Auto merge of #147640 - matthiaskrgr:rollup-fio3d88, r=matthiaskrgr

Rollup of 7 pull requests

Successful merges:

 - rust-lang/rust#144266 (Supress swapping lhs and rhs in equality suggestion in extern macro )
 - rust-lang/rust#147471 (Assert that non-extended temporaries and `super let` bindings have scopes)
 - rust-lang/rust#147533 (Renumber return local after state transform)
 - rust-lang/rust#147566 (rewrite outlives placeholder constraints to outlives static when handling opaque types)
 - rust-lang/rust#147613 (Make logging filters work again by moving EnvFilter into its own layer)
 - rust-lang/rust#147615 (reduce calls to attr.span() in old doc attr parsing)
 - rust-lang/rust#147636 (miri subtree update)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2025-10-13 19:22:06 +00:00
commit 4b94758d2b
252 changed files with 4129 additions and 913 deletions

View file

@ -344,7 +344,7 @@ pub(crate) fn compute_sccs_applying_placeholder_outlives_constraints<'tcx>(
}
}
fn rewrite_placeholder_outlives<'tcx>(
pub(crate) fn rewrite_placeholder_outlives<'tcx>(
sccs: &Sccs<RegionVid, ConstraintSccIndex>,
annotations: &SccAnnotations<'_, '_, RegionTracker>,
fr_static: RegionVid,

View file

@ -39,7 +39,7 @@ pub(super) fn apply_member_constraints<'tcx>(
debug!(?member_constraints);
for scc_a in rcx.constraint_sccs.all_sccs() {
debug!(?scc_a);
// Start by adding the region values required by outlives constraints. This
// Start by adding the region values required by outlives constraints. This
// matches how we compute the final region values in `fn compute_regions`.
//
// We need to do this here to get a lower bound when applying member constraints.
@ -64,6 +64,7 @@ fn apply_member_constraint<'tcx>(
// If the member region lives in a higher universe, we currently choose
// the most conservative option by leaving it unchanged.
if !rcx.max_placeholder_universe_reached(member).is_root() {
debug!("member region reached non root universe, bailing");
return;
}

View file

@ -253,6 +253,10 @@ fn collect_defining_uses<'tcx>(
}
} else {
errors.push(DeferredOpaqueTypeError::InvalidOpaqueTypeArgs(err));
debug!(
"collect_defining_uses: InvalidOpaqueTypeArgs for {:?} := {:?}",
non_nll_opaque_type_key, hidden_type
);
}
continue;
}
@ -276,6 +280,7 @@ fn collect_defining_uses<'tcx>(
defining_uses
}
#[instrument(level = "debug", skip(rcx, hidden_types, defining_uses, errors))]
fn compute_definition_site_hidden_types_from_defining_uses<'tcx>(
rcx: &RegionCtxt<'_, 'tcx>,
hidden_types: &mut DefinitionSiteHiddenTypes<'tcx>,
@ -287,6 +292,7 @@ fn compute_definition_site_hidden_types_from_defining_uses<'tcx>(
let mut decls_modulo_regions: FxIndexMap<OpaqueTypeKey<'tcx>, (OpaqueTypeKey<'tcx>, Span)> =
FxIndexMap::default();
for &DefiningUse { opaque_type_key, ref arg_regions, hidden_type } in defining_uses {
debug!(?opaque_type_key, ?arg_regions, ?hidden_type);
// After applying member constraints, we now map all regions in the hidden type
// to the `arg_regions` of this defining use. In case a region in the hidden type
// ended up not being equal to any such region, we error.
@ -294,6 +300,7 @@ fn compute_definition_site_hidden_types_from_defining_uses<'tcx>(
match hidden_type.try_fold_with(&mut ToArgRegionsFolder::new(rcx, arg_regions)) {
Ok(hidden_type) => hidden_type,
Err(r) => {
debug!("UnexpectedHiddenRegion: {:?}", r);
errors.push(DeferredOpaqueTypeError::UnexpectedHiddenRegion {
hidden_type,
opaque_type_key,

View file

@ -11,7 +11,9 @@ use crate::constraints::ConstraintSccIndex;
use crate::handle_placeholders::{SccAnnotations, region_definitions};
use crate::region_infer::reverse_sccs::ReverseSccGraph;
use crate::region_infer::values::RegionValues;
use crate::region_infer::{ConstraintSccs, RegionDefinition, RegionTracker, Representative};
use crate::region_infer::{
ConstraintSccs, OutlivesConstraintSet, RegionDefinition, RegionTracker, Representative,
};
use crate::type_check::MirTypeckRegionConstraints;
use crate::type_check::free_region_relations::UniversalRegionRelations;
use crate::universal_regions::UniversalRegions;
@ -39,16 +41,36 @@ impl<'a, 'tcx> RegionCtxt<'a, 'tcx> {
location_map: Rc<DenseLocationMap>,
constraints: &MirTypeckRegionConstraints<'tcx>,
) -> RegionCtxt<'a, 'tcx> {
let mut outlives_constraints = constraints.outlives_constraints.clone();
let universal_regions = &universal_region_relations.universal_regions;
let (definitions, _has_placeholders) = region_definitions(infcx, universal_regions);
let compute_sccs =
|outlives_constraints: &OutlivesConstraintSet<'tcx>,
annotations: &mut SccAnnotations<'_, 'tcx, RegionTracker>| {
ConstraintSccs::new_with_annotation(
&outlives_constraints
.graph(definitions.len())
.region_graph(outlives_constraints, universal_regions.fr_static),
annotations,
)
};
let mut scc_annotations = SccAnnotations::init(&definitions);
let constraint_sccs = ConstraintSccs::new_with_annotation(
&constraints
.outlives_constraints
.graph(definitions.len())
.region_graph(&constraints.outlives_constraints, universal_regions.fr_static),
&mut scc_annotations,
let mut constraint_sccs = compute_sccs(&outlives_constraints, &mut scc_annotations);
let added_constraints = crate::handle_placeholders::rewrite_placeholder_outlives(
&constraint_sccs,
&scc_annotations,
universal_regions.fr_static,
&mut outlives_constraints,
);
if added_constraints {
scc_annotations = SccAnnotations::init(&definitions);
constraint_sccs = compute_sccs(&outlives_constraints, &mut scc_annotations);
}
let scc_annotations = scc_annotations.scc_to_annotation;
// Unlike the `RegionInferenceContext`, we only care about free regions

View file

@ -498,7 +498,8 @@ fn resolve_local<'tcx>(
// Iterate up to the enclosing destruction scope to find the same scope that will also
// be used for the result of the block itself.
if let Some(inner_scope) = visitor.cx.var_parent {
(visitor.cx.var_parent, _) = visitor.scope_tree.default_temporary_scope(inner_scope)
visitor.cx.var_parent =
Some(visitor.scope_tree.default_temporary_scope(inner_scope).0)
}
// Don't lifetime-extend child `super let`s or block tail expressions' temporaries in
// the initializer when this `super let` is not itself extended by a parent `let`
@ -752,10 +753,10 @@ impl<'tcx> Visitor<'tcx> for ScopeResolutionVisitor<'tcx> {
// The body of the every fn is a root scope.
resolve_expr(this, body.value, true);
} else {
// Only functions have an outer terminating (drop) scope, while
// temporaries in constant initializers may be 'static, but only
// according to rvalue lifetime semantics, using the same
// syntactical rules used for let initializers.
// All bodies have an outer temporary drop scope, but temporaries
// and `super let` bindings in constant initializers may be extended
// to have 'static lifetimes, using the same syntactical rules used
// for `let` initializers.
//
// e.g., in `let x = &f();`, the temporary holding the result from
// the `f()` call lives for the entirety of the surrounding block.

View file

@ -3636,7 +3636,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.must_apply_modulo_regions()
{
let sm = self.tcx.sess.source_map();
if let Ok(rhs_snippet) = sm.span_to_snippet(rhs_expr.span)
// If the span of rhs_expr or lhs_expr is in an external macro,
// we just suppress the suggestion. See issue #139050
if !rhs_expr.span.in_external_macro(sm)
&& !lhs_expr.span.in_external_macro(sm)
&& let Ok(rhs_snippet) = sm.span_to_snippet(rhs_expr.span)
&& let Ok(lhs_snippet) = sm.span_to_snippet(lhs_expr.span)
{
err.note(format!("`{rhs_ty}` implements `PartialEq<{lhs_ty}>`"));

View file

@ -39,11 +39,11 @@ use std::io::{self, IsTerminal};
use tracing::dispatcher::SetGlobalDefaultError;
use tracing::{Event, Subscriber};
use tracing_subscriber::Registry;
use tracing_subscriber::filter::{Directive, EnvFilter, LevelFilter};
use tracing_subscriber::fmt::FmtContext;
use tracing_subscriber::fmt::format::{self, FormatEvent, FormatFields};
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::{Layer, Registry};
/// The values of all the environment variables that matter for configuring a logger.
/// Errors are explicitly preserved so that we can share error handling.
@ -152,18 +152,19 @@ where
}
}
let subscriber = build_subscriber().with(layer.with_filter(filter));
let subscriber = build_subscriber();
// NOTE: It is important to make sure that the filter is applied on the last layer
match cfg.backtrace {
Ok(backtrace_target) => {
let fmt_layer = tracing_subscriber::fmt::layer()
.with_writer(io::stderr)
.without_time()
.event_format(BacktraceFormatter { backtrace_target });
let subscriber = subscriber.with(fmt_layer);
let subscriber = subscriber.with(layer).with(fmt_layer).with(filter);
tracing::subscriber::set_global_default(subscriber)?;
}
Err(_) => {
tracing::subscriber::set_global_default(subscriber)?;
tracing::subscriber::set_global_default(subscriber.with(layer).with(filter))?;
}
};

View file

@ -16,7 +16,7 @@ use rustc_macros::{HashStable, TyDecodable, TyEncodable};
use rustc_span::{DUMMY_SP, Span};
use tracing::debug;
use crate::ty::TyCtxt;
use crate::ty::{self, TyCtxt};
/// Represents a statically-describable scope that can be used to
/// bound the lifetime/region for values.
@ -302,8 +302,8 @@ impl ScopeTree {
/// Returns the scope of non-lifetime-extended temporaries within a given scope, as well as
/// whether we've recorded a potential backwards-incompatible change to lint on.
/// Returns `None` when no enclosing temporary scope is found, such as for static items.
pub fn default_temporary_scope(&self, inner: Scope) -> (Option<Scope>, Option<Scope>) {
/// Panics if no enclosing temporary scope is found.
pub fn default_temporary_scope(&self, inner: Scope) -> (Scope, Option<Scope>) {
let mut id = inner;
let mut backwards_incompatible = None;
@ -311,11 +311,11 @@ impl ScopeTree {
match p.data {
ScopeData::Destruction => {
debug!("temporary_scope({inner:?}) = {id:?} [enclosing]");
return (Some(id), backwards_incompatible);
return (id, backwards_incompatible);
}
ScopeData::IfThenRescope | ScopeData::MatchGuard => {
debug!("temporary_scope({inner:?}) = {p:?} [enclosing]");
return (Some(p), backwards_incompatible);
return (p, backwards_incompatible);
}
ScopeData::Node
| ScopeData::CallSite
@ -335,7 +335,6 @@ impl ScopeTree {
}
}
debug!("temporary_scope({inner:?}) = None");
(None, backwards_incompatible)
span_bug!(ty::tls::with(|tcx| inner.span(tcx, self)), "no enclosing temporary scope")
}
}

View file

@ -31,12 +31,10 @@ impl RvalueScopes {
return (s, None);
}
// Otherwise, locate the innermost terminating scope
// if there's one. Static items, for instance, won't
// have an enclosing scope, hence no scope will be
// returned.
region_scope_tree
.default_temporary_scope(Scope { local_id: expr_id, data: ScopeData::Node })
// Otherwise, locate the innermost terminating scope.
let (scope, backward_incompatible) = region_scope_tree
.default_temporary_scope(Scope { local_id: expr_id, data: ScopeData::Node });
(Some(scope), backward_incompatible)
}
/// Make an association between a sub-expression and an extended lifetime

View file

@ -68,7 +68,7 @@ use rustc_hir::lang_items::LangItem;
use rustc_hir::{CoroutineDesugaring, CoroutineKind};
use rustc_index::bit_set::{BitMatrix, DenseBitSet, GrowableBitSet};
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::util::Discr;
use rustc_middle::ty::{
@ -110,6 +110,8 @@ impl<'tcx> MutVisitor<'tcx> for RenameLocalVisitor<'tcx> {
fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
if *local == self.from {
*local = self.to;
} else if *local == self.to {
*local = self.from;
}
}
@ -159,6 +161,7 @@ impl<'tcx> MutVisitor<'tcx> for SelfArgVisitor<'tcx> {
}
}
#[tracing::instrument(level = "trace", skip(tcx))]
fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtxt<'tcx>) {
place.local = new_base.local;
@ -166,6 +169,7 @@ fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtx
new_projection.append(&mut place.projection.to_vec());
place.projection = tcx.mk_place_elems(&new_projection);
tracing::trace!(?place);
}
const SELF_ARG: Local = Local::from_u32(1);
@ -204,8 +208,8 @@ struct TransformVisitor<'tcx> {
// The set of locals that have no `StorageLive`/`StorageDead` annotations.
always_live_locals: DenseBitSet<Local>,
// The original RETURN_PLACE local
old_ret_local: Local,
// New local we just create to hold the `CoroutineState` value.
new_ret_local: Local,
old_yield_ty: Ty<'tcx>,
@ -270,6 +274,7 @@ impl<'tcx> TransformVisitor<'tcx> {
// `core::ops::CoroutineState` only has single element tuple variants,
// so we can just write to the downcasted first field and then set the
// discriminant to the appropriate variant.
#[tracing::instrument(level = "trace", skip(self, statements))]
fn make_state(
&self,
val: Operand<'tcx>,
@ -341,13 +346,15 @@ impl<'tcx> TransformVisitor<'tcx> {
}
};
// Assign to `new_ret_local`, which will be replaced by `RETURN_PLACE` later.
statements.push(Statement::new(
source_info,
StatementKind::Assign(Box::new((Place::return_place(), rvalue))),
StatementKind::Assign(Box::new((self.new_ret_local.into(), rvalue))),
));
}
// Create a Place referencing a coroutine struct field
#[tracing::instrument(level = "trace", skip(self), ret)]
fn make_field(&self, variant_index: VariantIdx, idx: FieldIdx, ty: Ty<'tcx>) -> Place<'tcx> {
let self_place = Place::from(SELF_ARG);
let base = self.tcx.mk_place_downcast_unnamed(self_place, variant_index);
@ -358,6 +365,7 @@ impl<'tcx> TransformVisitor<'tcx> {
}
// Create a statement which changes the discriminant
#[tracing::instrument(level = "trace", skip(self))]
fn set_discr(&self, state_disc: VariantIdx, source_info: SourceInfo) -> Statement<'tcx> {
let self_place = Place::from(SELF_ARG);
Statement::new(
@ -370,6 +378,7 @@ impl<'tcx> TransformVisitor<'tcx> {
}
// Create a statement which reads the discriminant into a temporary
#[tracing::instrument(level = "trace", skip(self, body))]
fn get_discr(&self, body: &mut Body<'tcx>) -> (Statement<'tcx>, Place<'tcx>) {
let temp_decl = LocalDecl::new(self.discr_ty, body.span);
let local_decls_len = body.local_decls.push(temp_decl);
@ -382,6 +391,20 @@ impl<'tcx> TransformVisitor<'tcx> {
);
(assign, temp)
}
/// Swaps all references of `old_local` and `new_local`.
#[tracing::instrument(level = "trace", skip(self, body))]
fn replace_local(&mut self, old_local: Local, new_local: Local, body: &mut Body<'tcx>) {
body.local_decls.swap(old_local, new_local);
let mut visitor = RenameLocalVisitor { from: old_local, to: new_local, tcx: self.tcx };
visitor.visit_body(body);
for suspension in &mut self.suspension_points {
let ctxt = PlaceContext::MutatingUse(MutatingUseContext::Yield);
let location = Location { block: START_BLOCK, statement_index: 0 };
visitor.visit_place(&mut suspension.resume_arg, ctxt, location);
}
}
}
impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
@ -389,48 +412,62 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
self.tcx
}
fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
#[tracing::instrument(level = "trace", skip(self), ret)]
fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _location: Location) {
assert!(!self.remap.contains(*local));
}
fn visit_place(
&mut self,
place: &mut Place<'tcx>,
_context: PlaceContext,
_location: Location,
) {
#[tracing::instrument(level = "trace", skip(self), ret)]
fn visit_place(&mut self, place: &mut Place<'tcx>, _: PlaceContext, _location: Location) {
// Replace an Local in the remap with a coroutine struct access
if let Some(&Some((ty, variant_index, idx))) = self.remap.get(place.local) {
replace_base(place, self.make_field(variant_index, idx, ty), self.tcx);
}
}
fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
#[tracing::instrument(level = "trace", skip(self, stmt), ret)]
fn visit_statement(&mut self, stmt: &mut Statement<'tcx>, location: Location) {
// Remove StorageLive and StorageDead statements for remapped locals
for s in &mut data.statements {
if let StatementKind::StorageLive(l) | StatementKind::StorageDead(l) = s.kind
&& self.remap.contains(l)
{
s.make_nop(true);
}
if let StatementKind::StorageLive(l) | StatementKind::StorageDead(l) = stmt.kind
&& self.remap.contains(l)
{
stmt.make_nop(true);
}
self.super_statement(stmt, location);
}
let ret_val = match data.terminator().kind {
#[tracing::instrument(level = "trace", skip(self, term), ret)]
fn visit_terminator(&mut self, term: &mut Terminator<'tcx>, location: Location) {
if let TerminatorKind::Return = term.kind {
// `visit_basic_block_data` introduces `Return` terminators which read `RETURN_PLACE`.
// But this `RETURN_PLACE` is already remapped, so we should not touch it again.
return;
}
self.super_terminator(term, location);
}
#[tracing::instrument(level = "trace", skip(self, data), ret)]
fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
match data.terminator().kind {
TerminatorKind::Return => {
Some((true, None, Operand::Move(Place::from(self.old_ret_local)), None))
let source_info = data.terminator().source_info;
// We must assign the value first in case it gets declared dead below
self.make_state(
Operand::Move(Place::return_place()),
source_info,
true,
&mut data.statements,
);
// Return state.
let state = VariantIdx::new(CoroutineArgs::RETURNED);
data.statements.push(self.set_discr(state, source_info));
data.terminator_mut().kind = TerminatorKind::Return;
}
TerminatorKind::Yield { ref value, resume, resume_arg, drop } => {
Some((false, Some((resume, resume_arg)), value.clone(), drop))
}
_ => None,
};
if let Some((is_return, resume, v, drop)) = ret_val {
let source_info = data.terminator().source_info;
// We must assign the value first in case it gets declared dead below
self.make_state(v, source_info, is_return, &mut data.statements);
let state = if let Some((resume, mut resume_arg)) = resume {
// Yield
TerminatorKind::Yield { ref value, resume, mut resume_arg, drop } => {
let source_info = data.terminator().source_info;
// We must assign the value first in case it gets declared dead below
self.make_state(value.clone(), source_info, false, &mut data.statements);
// Yield state.
let state = CoroutineArgs::RESERVED_VARIANTS + self.suspension_points.len();
// The resume arg target location might itself be remapped if its base local is
@ -461,13 +498,11 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
storage_liveness,
});
VariantIdx::new(state)
} else {
// Return
VariantIdx::new(CoroutineArgs::RETURNED) // state for returned
};
data.statements.push(self.set_discr(state, source_info));
data.terminator_mut().kind = TerminatorKind::Return;
let state = VariantIdx::new(state);
data.statements.push(self.set_discr(state, source_info));
data.terminator_mut().kind = TerminatorKind::Return;
}
_ => {}
}
self.super_basic_block_data(block, data);
@ -483,6 +518,7 @@ fn make_aggregate_adt<'tcx>(
Rvalue::Aggregate(Box::new(AggregateKind::Adt(def_id, variant_idx, args, None, None)), operands)
}
#[tracing::instrument(level = "trace", skip(tcx, body))]
fn make_coroutine_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let coroutine_ty = body.local_decls.raw[1].ty;
@ -495,6 +531,7 @@ fn make_coroutine_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Bo
SelfArgVisitor::new(tcx, ProjectionElem::Deref).visit_body(body);
}
#[tracing::instrument(level = "trace", skip(tcx, body))]
fn make_coroutine_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let ref_coroutine_ty = body.local_decls.raw[1].ty;
@ -511,27 +548,6 @@ fn make_coroutine_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body
.visit_body(body);
}
/// Allocates a new local and replaces all references of `local` with it. Returns the new local.
///
/// `local` will be changed to a new local decl with type `ty`.
///
/// Note that the new local will be uninitialized. It is the caller's responsibility to assign some
/// valid value to it before its first use.
fn replace_local<'tcx>(
local: Local,
ty: Ty<'tcx>,
body: &mut Body<'tcx>,
tcx: TyCtxt<'tcx>,
) -> Local {
let new_decl = LocalDecl::new(ty, body.span);
let new_local = body.local_decls.push(new_decl);
body.local_decls.swap(local, new_local);
RenameLocalVisitor { from: local, to: new_local, tcx }.visit_body(body);
new_local
}
/// Transforms the `body` of the coroutine applying the following transforms:
///
/// - Eliminates all the `get_context` calls that async lowering created.
@ -553,6 +569,7 @@ fn replace_local<'tcx>(
/// The async lowering step and the type / lifetime inference / checking are
/// still using the `ResumeTy` indirection for the time being, and that indirection
/// is removed here. After this transform, the coroutine body only knows about `&mut Context<'_>`.
#[tracing::instrument(level = "trace", skip(tcx, body), ret)]
fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> Ty<'tcx> {
let context_mut_ref = Ty::new_task_context(tcx);
@ -606,6 +623,7 @@ fn eliminate_get_context_call<'tcx>(bb_data: &mut BasicBlockData<'tcx>) -> Local
}
#[cfg_attr(not(debug_assertions), allow(unused))]
#[tracing::instrument(level = "trace", skip(tcx, body), ret)]
fn replace_resume_ty_local<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
@ -670,6 +688,7 @@ struct LivenessInfo {
/// case none exist, the local is considered to be always live.
/// - a local has to be stored if it is either directly used after the
/// the suspend point, or if it is live and has been previously borrowed.
#[tracing::instrument(level = "trace", skip(tcx, body))]
fn locals_live_across_suspend_points<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
@ -945,6 +964,7 @@ impl StorageConflictVisitor<'_, '_> {
}
}
#[tracing::instrument(level = "trace", skip(liveness, body))]
fn compute_layout<'tcx>(
liveness: LivenessInfo,
body: &Body<'tcx>,
@ -1049,7 +1069,9 @@ fn compute_layout<'tcx>(
variant_source_info,
storage_conflicts,
};
debug!(?remap);
debug!(?layout);
debug!(?storage_liveness);
(remap, layout, storage_liveness)
}
@ -1221,6 +1243,7 @@ fn generate_poison_block_and_redirect_unwinds_there<'tcx>(
}
}
#[tracing::instrument(level = "trace", skip(tcx, transform, body))]
fn create_coroutine_resume_function<'tcx>(
tcx: TyCtxt<'tcx>,
transform: TransformVisitor<'tcx>,
@ -1299,7 +1322,7 @@ fn create_coroutine_resume_function<'tcx>(
}
/// An operation that can be performed on a coroutine.
#[derive(PartialEq, Copy, Clone)]
#[derive(PartialEq, Copy, Clone, Debug)]
enum Operation {
Resume,
Drop,
@ -1314,6 +1337,7 @@ impl Operation {
}
}
#[tracing::instrument(level = "trace", skip(transform, body))]
fn create_cases<'tcx>(
body: &mut Body<'tcx>,
transform: &TransformVisitor<'tcx>,
@ -1445,6 +1469,8 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
// This only applies to coroutines
return;
};
tracing::trace!(def_id = ?body.source.def_id());
let old_ret_ty = body.return_ty();
assert!(body.coroutine_drop().is_none() && body.coroutine_drop_async().is_none());
@ -1491,10 +1517,6 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
}
};
// We rename RETURN_PLACE which has type mir.return_ty to old_ret_local
// RETURN_PLACE then is a fresh unused local with type ret_ty.
let old_ret_local = replace_local(RETURN_PLACE, new_ret_ty, body, tcx);
// We need to insert clean drop for unresumed state and perform drop elaboration
// (finally in open_drop_for_tuple) before async drop expansion.
// Async drops, produced by this drop elaboration, will be expanded,
@ -1541,6 +1563,11 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
let can_return = can_return(tcx, body, body.typing_env(tcx));
// We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
// RETURN_PLACE then is a fresh unused local with type ret_ty.
let new_ret_local = body.local_decls.push(LocalDecl::new(new_ret_ty, body.span));
tracing::trace!(?new_ret_local);
// Run the transformation which converts Places from Local to coroutine struct
// accesses for locals in `remap`.
// It also rewrites `return x` and `yield y` as writing a new coroutine state and returning
@ -1553,13 +1580,16 @@ impl<'tcx> crate::MirPass<'tcx> for StateTransform {
storage_liveness,
always_live_locals,
suspension_points: Vec::new(),
old_ret_local,
discr_ty,
new_ret_local,
old_ret_ty,
old_yield_ty,
};
transform.visit_body(body);
// Swap the actual `RETURN_PLACE` and the provisional `new_ret_local`.
transform.replace_local(RETURN_PLACE, new_ret_local, body);
// MIR parameters are not explicitly assigned-to when entering the MIR body.
// If we want to save their values inside the coroutine state, we need to do so explicitly.
let source_info = SourceInfo::outermost(body.span);

View file

@ -126,6 +126,7 @@ fn build_pin_fut<'tcx>(
// Ready() => ready_block
// Pending => yield_block
//}
#[tracing::instrument(level = "trace", skip(tcx, body), ret)]
fn build_poll_switch<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
@ -179,6 +180,7 @@ fn build_poll_switch<'tcx>(
}
// Gather blocks, reachable through 'drop' targets of Yield and Drop terminators (chained)
#[tracing::instrument(level = "trace", skip(body), ret)]
fn gather_dropline_blocks<'tcx>(body: &mut Body<'tcx>) -> DenseBitSet<BasicBlock> {
let mut dropline: DenseBitSet<BasicBlock> = DenseBitSet::new_empty(body.basic_blocks.len());
for (bb, data) in traversal::reverse_postorder(body) {
@ -249,6 +251,7 @@ pub(super) fn has_expandable_async_drops<'tcx>(
}
/// Expand Drop terminator for async drops into mainline poll-switch and dropline poll-switch
#[tracing::instrument(level = "trace", skip(tcx, body), ret)]
pub(super) fn expand_async_drops<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
@ -259,6 +262,7 @@ pub(super) fn expand_async_drops<'tcx>(
let dropline = gather_dropline_blocks(body);
// Clean drop and async_fut fields if potentially async drop is not expanded (stays sync)
let remove_asyncness = |block: &mut BasicBlockData<'tcx>| {
tracing::trace!("remove_asyncness");
if let TerminatorKind::Drop {
place: _,
target: _,
@ -461,6 +465,7 @@ pub(super) fn expand_async_drops<'tcx>(
}
}
#[tracing::instrument(level = "trace", skip(tcx, body))]
pub(super) fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
use crate::elaborate_drop::{Unwind, elaborate_drop};
use crate::patch::MirPatch;
@ -519,6 +524,7 @@ pub(super) fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body
elaborator.patch.apply(body);
}
#[tracing::instrument(level = "trace", skip(tcx, body), ret)]
pub(super) fn insert_clean_drop<'tcx>(
tcx: TyCtxt<'tcx>,
body: &mut Body<'tcx>,
@ -550,6 +556,7 @@ pub(super) fn insert_clean_drop<'tcx>(
.push(BasicBlockData::new(Some(Terminator { source_info, kind: term }), false))
}
#[tracing::instrument(level = "trace", skip(tcx, transform, body))]
pub(super) fn create_coroutine_drop_shim<'tcx>(
tcx: TyCtxt<'tcx>,
transform: &TransformVisitor<'tcx>,
@ -621,6 +628,7 @@ pub(super) fn create_coroutine_drop_shim<'tcx>(
}
// Create async drop shim function to drop coroutine itself
#[tracing::instrument(level = "trace", skip(tcx, transform, body))]
pub(super) fn create_coroutine_drop_shim_async<'tcx>(
tcx: TyCtxt<'tcx>,
transform: &TransformVisitor<'tcx>,

View file

@ -296,6 +296,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
[sym::thread_local, ..] => self.check_thread_local(attr, span, target),
[sym::doc, ..] => self.check_doc_attrs(
attr,
attr.span(),
attr_item.style,
hir_id,
target,
@ -1089,18 +1090,18 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
/// Checks that an attribute is used at the crate level. Returns `true` if valid.
fn check_attr_crate_level(
&self,
attr: &Attribute,
attr_span: Span,
style: AttrStyle,
meta: &MetaItemInner,
hir_id: HirId,
) -> bool {
if hir_id != CRATE_HIR_ID {
// insert a bang between `#` and `[...`
let bang_span = attr.span().lo() + BytePos(1);
let bang_span = attr_span.lo() + BytePos(1);
let sugg = (style == AttrStyle::Outer
&& self.tcx.hir_get_parent_item(hir_id) == CRATE_OWNER_ID)
.then_some(errors::AttrCrateLevelOnlySugg {
attr: attr.span().with_lo(bang_span).with_hi(bang_span),
attr: attr_span.with_lo(bang_span).with_hi(bang_span),
});
self.tcx.emit_node_span_lint(
INVALID_DOC_ATTRIBUTES,
@ -1116,7 +1117,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
/// Checks that `doc(test(...))` attribute contains only valid attributes and are at the right place.
fn check_test_attr(
&self,
attr: &Attribute,
attr_span: Span,
style: AttrStyle,
meta: &MetaItemInner,
hir_id: HirId,
@ -1128,7 +1129,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
// Allowed everywhere like `#[doc]`
}
(Some(sym::no_crate_inject), _) => {
self.check_attr_crate_level(attr, style, meta, hir_id);
self.check_attr_crate_level(attr_span, style, meta, hir_id);
}
(_, Some(m)) => {
self.tcx.emit_node_span_lint(
@ -1225,6 +1226,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
fn check_doc_attrs(
&self,
attr: &Attribute,
attr_span: Span,
style: AttrStyle,
hir_id: HirId,
target: Target,
@ -1274,7 +1276,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
}
Some(sym::test) => {
self.check_test_attr(attr, style, meta, hir_id);
self.check_test_attr(attr_span, style, meta, hir_id);
}
Some(
@ -1285,7 +1287,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
| sym::html_root_url
| sym::html_no_source,
) => {
self.check_attr_crate_level(attr, style, meta, hir_id);
self.check_attr_crate_level(attr_span, style, meta, hir_id);
}
Some(sym::auto_cfg) => {
@ -1301,7 +1303,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
Some(sym::cfg | sym::hidden | sym::notable_trait) => {}
Some(sym::rust_logo) => {
if self.check_attr_crate_level(attr, style, meta, hir_id)
if self.check_attr_crate_level(attr_span, style, meta, hir_id)
&& !self.tcx.features().rustdoc_internals()
{
feature_err(

View file

@ -231,6 +231,9 @@ jobs:
exit ${exitcode}
fi
# Store merge commit message
git log -1 --pretty=%B > message.txt
# Format changes
./miri toolchain
./miri fmt --check || (./miri fmt && git commit -am "fmt")
@ -239,7 +242,7 @@ jobs:
BRANCH="rustup-$(date -u +%Y-%m-%d)"
git switch -c $BRANCH
git push -u origin $BRANCH
gh pr create -B master --title 'Automatic Rustup' --body "Update \`rustc\` to https://github.com/rust-lang/rust/commit/$(cat rust-version)."
gh pr create -B master --title 'Automatic Rustup' --body-file message.txt
env:
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}

View file

@ -246,6 +246,21 @@ such races.
Note: `cargo-nextest` does not support doctests, see https://github.com/nextest-rs/nextest/issues/16
### Directly invoking the `miri` driver
The recommended way to invoke Miri is via `cargo miri`. Directly invoking the underlying `miri`
driver is not supported, which is why that binary is not even installed into the PATH. However, if
you need to run Miri on many small tests and want to invoke it directly like you would invoke
`rustc`, that is still possible with a bit of extra effort:
```sh
# one-time setup
cargo +nightly miri setup
SYSROOT=$(cargo +nightly miri setup --print-sysroot)
# per file
~/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/miri --sysroot "$SYSROOT" file.rs
```
### Common Problems
When using the above instructions, you may encounter a number of confusing compiler

View file

@ -1,15 +1,26 @@
# **(WIP)** Documentation for Miri-GenMC
**NOTE: GenMC mode is not yet fully implemented, and has [several correctness issues](https://github.com/rust-lang/miri/issues/4572). Using GenMC mode currently requires manually compiling Miri, see [Usage](#usage).**
[GenMC](https://github.com/MPI-SWS/genmc) is a stateless model checker for exploring concurrent executions of a program.
Miri-GenMC integrates that model checker into Miri.
**NOTE: Currently, no actual GenMC functionality is part of Miri, this is still WIP.**
Miri in GenMC mode takes a program as input like regular Miri, but instead of running it once, the program is executed repeatedly, until all possible executions allowed by the Rust memory model are explored.
This includes all possible thread interleavings and all allowed return values for atomic operations, including cases that are very rare to encounter on actual hardware.
(However, this does not include other sources of non-determinism, such as the absolute addresses of allocations.
It is hence still possible to have latent bugs in a test case even if they passed GenMC.)
<!-- FIXME(genmc): add explanation. -->
GenMC requires the input program to be bounded, i.e., have finitely many possible executions, otherwise it will not terminate.
Any loops that may run infinitely must be replaced or bounded (see below).
GenMC makes use of Dynamic Partial Order Reduction (DPOR) to reduce the number of executions that must be explored, but the runtime can still be super-exponential in the size of the input program (number of threads and amount of interaction between threads).
Large programs may not be verifiable in a reasonable amount of time.
## Usage
For testing/developing Miri-GenMC:
- install all [dependencies required by GenMC](https://github.com/MPI-SWS/genmc?tab=readme-ov-file#dependencies)
- clone the Miri repo.
- build Miri-GenMC with `./miri build --features=genmc`.
- OR: install Miri-GenMC in the current system with `./miri install --features=genmc`
@ -50,6 +61,66 @@ Note that `cargo miri test` in GenMC mode is currently not supported.
<!-- FIXME(genmc): add tips for using Miri-GenMC more efficiently. -->
### Eliminating unbounded loops
As mentioned above, GenMC requires all loops to be bounded.
Otherwise, it is not possible to exhaustively explore all executions.
Currently, Miri-GenMC has no support for automatically bounding loops, so this needs to be done manually.
#### Bounding loops without side effects
The easiest case is that of a loop that simply spins until it observes a certain condition, without any side effects.
Such loops can be limited to one iteration, as demonstrated by the following example:
```rust
#[cfg(miri)]
unsafe extern "Rust" {
// This is a special function that Miri provides.
// It blocks the thread calling this function if the condition is false.
pub unsafe fn miri_genmc_assume(condition: bool);
}
// This functions loads an atomic boolean in a loop until it is true.
// GenMC will explore all executions where this does 1, 2, ..., ∞ loads, which means the verification will never terminate.
fn spin_until_true(flag: &AtomicBool) {
while !flag.load(Relaxed) {
std::hint::spin_loop();
}
}
// By replacing this loop with an assume statement, the only executions that will be explored are those with exactly 1 load that observes the expected value.
// Incorrect use of assume statements can lead GenMC to miss important executions, so it is marked `unsafe`.
fn spin_until_true_genmc(flag: &AtomicBool) {
unsafe { miri_genmc_assume(flag.load(Relaxed)) };
}
```
#### Bounding loops with side effects
Some loops do contain side effects, meaning the number of explored iterations affects the rest of the program.
Replacing the loop with one iteration like we did above would mean we miss all those possible executions.
In such a case, the loop can be limited to a fixed number of iterations instead.
The choice of iteration limit trades off verification time for possibly missing bugs requiring more iterations.
```rust
/// The loop in this function has a side effect, which is to increment the counter for the number of iterations.
/// Instead of replacing the loop entirely (which would miss all executions with `count > 0`), we limit the loop to at most 3 iterations.
fn count_until_true_genmc(flag: &AtomicBool) -> u64 {
let mut count = 0;
while !flag.load(Relaxed) {
count += 1;
std::hint::spin_loop();
// Any execution that takes more than 3 iterations will not be explored.
unsafe { miri_genmc_assume(count <= 3) };
}
count
}
```
<!-- FIXME: update the code above once Miri supports a loop bounding features like GenMC's `--unroll=N`. -->
<!-- FIXME: update this section once Miri-GenMC supports automatic program transformations (like spinloop-assume replacement). -->
## Limitations
Some or all of these limitations might get removed in the future:

View file

@ -28,7 +28,7 @@ mod downloading {
/// The GenMC repository the we get our commit from.
pub(crate) const GENMC_GITHUB_URL: &str = "https://gitlab.inf.ethz.ch/public-plf/genmc.git";
/// The GenMC commit we depend on. It must be available on the specified GenMC repository.
pub(crate) const GENMC_COMMIT: &str = "af9cc9ccd5d412b16defc35dbf36571c63a19c76";
pub(crate) const GENMC_COMMIT: &str = "ce775ccd7866db820fa12ffca66463087a11dd96";
/// Ensure that a local GenMC repo is present and set to the correct commit.
/// Return the path of the GenMC repo and whether the checked out commit was changed.
@ -227,12 +227,17 @@ fn compile_cpp_dependencies(genmc_path: &Path, always_configure: bool) {
// These definitions are parsed into a cmake list and then printed to the config.h file, so they are ';' separated.
let definitions = llvm_definitions.split(";");
// These are all the C++ files we need to compile, which needs to be updated if more C++ files are added to Miri.
// We use absolute paths since relative paths can confuse IDEs when attempting to go-to-source on a path in a compiler error.
let cpp_files_base_path = Path::new("cpp/src/");
let cpp_files = [
"./cpp/src/MiriInterface/EventHandling.cpp",
"./cpp/src/MiriInterface/Exploration.cpp",
"./cpp/src/MiriInterface/Setup.cpp",
"./cpp/src/MiriInterface/ThreadManagement.cpp",
];
"MiriInterface/EventHandling.cpp",
"MiriInterface/Exploration.cpp",
"MiriInterface/Mutex.cpp",
"MiriInterface/Setup.cpp",
"MiriInterface/ThreadManagement.cpp",
]
.map(|file| std::path::absolute(cpp_files_base_path.join(file)).unwrap());
let mut bridge = cxx_build::bridge("src/lib.rs");
// FIXME(genmc,cmake): Remove once the GenMC debug setting is available in the config.h file.

View file

@ -12,7 +12,6 @@
// GenMC headers:
#include "ExecutionGraph/EventLabel.hpp"
#include "Static/ModuleID.hpp"
#include "Support/MemOrdering.hpp"
#include "Support/RMWOps.hpp"
#include "Verification/Config.hpp"
@ -36,6 +35,7 @@ struct LoadResult;
struct StoreResult;
struct ReadModifyWriteResult;
struct CompareExchangeResult;
struct MutexLockResult;
// GenMC uses `int` for its thread IDs.
using ThreadId = int;
@ -126,7 +126,7 @@ struct MiriGenmcShim : private GenMCDriver {
/**** Memory (de)allocation ****/
auto handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment) -> uint64_t;
void handle_free(ThreadId thread_id, uint64_t address);
auto handle_free(ThreadId thread_id, uint64_t address) -> bool;
/**** Thread management ****/
void handle_thread_create(ThreadId thread_id, ThreadId parent_id);
@ -134,6 +134,16 @@ struct MiriGenmcShim : private GenMCDriver {
void handle_thread_finish(ThreadId thread_id, uint64_t ret_val);
void handle_thread_kill(ThreadId thread_id);
/**** Blocking instructions ****/
/// Inform GenMC that the thread should be blocked.
void handle_assume_block(ThreadId thread_id, AssumeType assume_type);
/**** Mutex handling ****/
auto handle_mutex_lock(ThreadId thread_id, uint64_t address, uint64_t size) -> MutexLockResult;
auto handle_mutex_try_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult;
auto handle_mutex_unlock(ThreadId thread_id, uint64_t address, uint64_t size) -> StoreResult;
/***** Exploration related functionality *****/
/** Ask the GenMC scheduler for a new thread to schedule and return whether the execution is
@ -207,9 +217,10 @@ struct MiriGenmcShim : private GenMCDriver {
* Automatically calls `inc_pos` and `dec_pos` where needed for the given thread.
*/
template <EventLabel::EventLabelKind k, typename... Ts>
auto handle_load_reset_if_none(ThreadId tid, Ts&&... params) -> HandleResult<SVal> {
auto handle_load_reset_if_none(ThreadId tid, std::optional<SVal> old_val, Ts&&... params)
-> HandleResult<SVal> {
const auto pos = inc_pos(tid);
const auto ret = GenMCDriver::handleLoad<k>(pos, std::forward<Ts>(params)...);
const auto ret = GenMCDriver::handleLoad<k>(pos, old_val, std::forward<Ts>(params)...);
// If we didn't get a value, we have to reset the index of the current thread.
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(tid);
@ -250,6 +261,7 @@ namespace GenmcScalarExt {
inline GenmcScalar uninit() {
return GenmcScalar {
.value = 0,
.extra = 0,
.is_init = false,
};
}
@ -257,13 +269,20 @@ inline GenmcScalar uninit() {
inline GenmcScalar from_sval(SVal sval) {
return GenmcScalar {
.value = sval.get(),
.extra = sval.getExtra(),
.is_init = true,
};
}
inline SVal to_sval(GenmcScalar scalar) {
ERROR_ON(!scalar.is_init, "Cannot convert an uninitialized `GenmcScalar` into an `SVal`\n");
return SVal(scalar.value);
return SVal(scalar.value, scalar.extra);
}
inline std::optional<SVal> try_to_sval(GenmcScalar scalar) {
if (scalar.is_init)
return { SVal(scalar.value, scalar.extra) };
return std::nullopt;
}
} // namespace GenmcScalarExt
@ -342,4 +361,22 @@ inline CompareExchangeResult from_error(std::unique_ptr<std::string> error) {
}
} // namespace CompareExchangeResultExt
namespace MutexLockResultExt {
inline MutexLockResult ok(bool is_lock_acquired) {
return MutexLockResult { /* error: */ nullptr, /* is_reset: */ false, is_lock_acquired };
}
inline MutexLockResult reset() {
return MutexLockResult { /* error: */ nullptr,
/* is_reset: */ true,
/* is_lock_acquired: */ false };
}
inline MutexLockResult from_error(std::unique_ptr<std::string> error) {
return MutexLockResult { /* error: */ std::move(error),
/* is_reset: */ false,
/* is_lock_acquired: */ false };
}
} // namespace MutexLockResultExt
#endif /* GENMC_MIRI_INTERFACE_HPP */

View file

@ -30,6 +30,13 @@
#include <memory>
#include <utility>
/**** Blocking instructions ****/
void MiriGenmcShim::handle_assume_block(ThreadId thread_id, AssumeType assume_type) {
BUG_ON(getExec().getGraph().isThreadBlocked(thread_id));
GenMCDriver::handleAssume(inc_pos(thread_id), assume_type);
}
/**** Memory access handling ****/
[[nodiscard]] auto MiriGenmcShim::handle_load(
@ -43,6 +50,7 @@
const auto type = AType::Unsigned;
const auto ret = handle_load_reset_if_none<EventLabel::EventLabelKind::Read>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
@ -52,6 +60,7 @@
if (const auto* err = std::get_if<VerificationError>(&ret))
return LoadResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
if (ret_val == nullptr)
ERROR("Unimplemented: load returned unexpected result.");
return LoadResultExt::from_value(*ret_val);
@ -68,6 +77,7 @@
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::Write>(
pos,
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
@ -78,15 +88,14 @@
if (const auto* err = std::get_if<VerificationError>(&ret))
return StoreResultExt::from_error(format_error(*err));
if (!std::holds_alternative<std::monostate>(ret))
ERROR("store returned unexpected result");
// FIXME(genmc,mixed-accesses): Use the value that GenMC returns from handleStore (once
// available).
const auto& g = getExec().getGraph();
return StoreResultExt::ok(
/* is_coherence_order_maximal_write */ g.co_max(SAddr(address))->getPos() == pos
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: Store returned unexpected result."
);
return StoreResultExt::ok(*is_coherence_order_maximal_write);
}
void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
@ -111,6 +120,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
// `FaiRead` and `FaiWrite`.
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::FaiRead>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
@ -123,6 +133,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
return ReadModifyWriteResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
if (nullptr == ret_val) {
ERROR("Unimplemented: read-modify-write returned unexpected result.");
}
@ -133,6 +144,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::FaiWrite>(
storePos,
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
@ -142,16 +154,16 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return ReadModifyWriteResultExt::from_error(format_error(*err));
const auto* store_ret_val = std::get_if<std::monostate>(&store_ret);
ERROR_ON(nullptr == store_ret_val, "Unimplemented: RMW store returned unexpected result.");
// FIXME(genmc,mixed-accesses): Use the value that GenMC returns from handleStore (once
// available).
const auto& g = getExec().getGraph();
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: RMW store returned unexpected result."
);
return ReadModifyWriteResultExt::ok(
/* old_value: */ read_old_val,
new_value,
/* is_coherence_order_maximal_write */ g.co_max(SAddr(address))->getPos() == storePos
*is_coherence_order_maximal_write
);
}
@ -177,6 +189,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::CasRead>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
@ -187,6 +200,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return CompareExchangeResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(nullptr == ret_val, "Unimplemented: load returned unexpected result.");
const auto read_old_val = *ret_val;
if (read_old_val != expectedVal)
@ -197,6 +211,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::CasWrite>(
storePos,
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
@ -205,19 +220,13 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return CompareExchangeResultExt::from_error(format_error(*err));
const auto* store_ret_val = std::get_if<std::monostate>(&store_ret);
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(
nullptr == store_ret_val,
nullptr == is_coherence_order_maximal_write,
"Unimplemented: compare-exchange store returned unexpected result."
);
// FIXME(genmc,mixed-accesses): Use the value that GenMC returns from handleStore (once
// available).
const auto& g = getExec().getGraph();
return CompareExchangeResultExt::success(
read_old_val,
/* is_coherence_order_maximal_write */ g.co_max(SAddr(address))->getPos() == storePos
);
return CompareExchangeResultExt::success(read_old_val, *is_coherence_order_maximal_write);
}
/**** Memory (de)allocation ****/
@ -244,8 +253,9 @@ auto MiriGenmcShim::handle_malloc(ThreadId thread_id, uint64_t size, uint64_t al
return ret_val.get();
}
void MiriGenmcShim::handle_free(ThreadId thread_id, uint64_t address) {
auto MiriGenmcShim::handle_free(ThreadId thread_id, uint64_t address) -> bool {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleFree(pos, SAddr(address), EventDeps());
// FIXME(genmc): add error handling once GenMC returns errors from `handleFree`
// FIXME(genmc): use returned error from `handleFree` once implemented in GenMC.
return getResult().status.has_value();
}

View file

@ -24,8 +24,10 @@ auto MiriGenmcShim::schedule_next(
if (const auto result = GenMCDriver::scheduleNext(threads_action_))
return SchedulingResult { ExecutionState::Ok, static_cast<int32_t>(result.value()) };
if (GenMCDriver::isExecutionBlocked())
if (getExec().getGraph().isBlocked())
return SchedulingResult { ExecutionState::Blocked, 0 };
if (getResult().status.has_value()) // the "value" here is a `VerificationError`
return SchedulingResult { ExecutionState::Error, 0 };
return SchedulingResult { ExecutionState::Finished, 0 };
}

View file

@ -0,0 +1,159 @@
/** This file contains functionality related to handling mutexes. */
#include "MiriInterface.hpp"
// GenMC headers:
#include "Static/ModuleID.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
#define MUTEX_UNLOCKED SVal(0)
#define MUTEX_LOCKED SVal(1)
auto MiriGenmcShim::handle_mutex_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult {
// This annotation informs GenMC about the condition required to make this lock call succeed.
// It stands for `value_read_by_load != MUTEX_LOCKED`.
const auto size_bits = size * 8;
const auto annot = std::move(Annotation(
AssumeType::Spinloop,
Annotation::ExprVP(
NeExpr<ModuleID::ID>::create(
// `RegisterExpr` marks the value of the current expression, i.e., the loaded value.
// The `id` is ignored by GenMC; it is only used by the LLI frontend to substitute
// other variables from previous expressions that may be used here.
RegisterExpr<ModuleID::ID>::create(size_bits, /* id */ 0),
ConcreteExpr<ModuleID::ID>::create(size_bits, MUTEX_LOCKED)
)
.release()
)
));
// As usual, we need to tell GenMC which value was stored at this location before this atomic
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MUTEX_UNLOCKED;
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::LockCasRead>(
thread_id,
old_val,
address,
size,
annot,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return MutexLockResultExt::from_error(format_error(*err));
// If we get a `Reset`, GenMC decided that this lock operation should not yet run, since it
// would not acquire the mutex. Like the handling of the case further down where we read a `1`
// ("Mutex already locked"), Miri should call the handle function again once the current thread
// is scheduled by GenMC the next time.
if (std::holds_alternative<Reset>(load_ret))
return MutexLockResultExt::reset();
const auto* ret_val = std::get_if<SVal>(&load_ret);
ERROR_ON(!ret_val, "Unimplemented: mutex lock returned unexpected result.");
ERROR_ON(
*ret_val != MUTEX_UNLOCKED && *ret_val != MUTEX_LOCKED,
"Mutex read value was neither 0 nor 1"
);
const bool is_lock_acquired = *ret_val == MUTEX_UNLOCKED;
if (is_lock_acquired) {
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::LockCasWrite>(
inc_pos(thread_id),
old_val,
address,
size,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return MutexLockResultExt::from_error(format_error(*err));
// We don't update Miri's memory for this operation so we don't need to know if the store
// was the co-maximal store, but we still check that we at least get a boolean as the result
// of the store.
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: store part of mutex try_lock returned unexpected result."
);
} else {
// We did not acquire the mutex, so we tell GenMC to block the thread until we can acquire
// it. GenMC determines this based on the annotation we pass with the load further up in
// this function, namely when that load will read a value other than `MUTEX_LOCKED`.
this->handle_assume_block(thread_id, AssumeType::Spinloop);
}
return MutexLockResultExt::ok(is_lock_acquired);
}
auto MiriGenmcShim::handle_mutex_try_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult {
auto& currPos = threads_action_[thread_id].event;
// As usual, we need to tell GenMC which value was stored at this location before this atomic
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MUTEX_UNLOCKED;
const auto load_ret = GenMCDriver::handleLoad<EventLabel::EventLabelKind::TrylockCasRead>(
++currPos,
old_val,
SAddr(address),
ASize(size)
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return MutexLockResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
if (nullptr == ret_val) {
ERROR("Unimplemented: mutex trylock load returned unexpected result.");
}
ERROR_ON(
*ret_val != MUTEX_UNLOCKED && *ret_val != MUTEX_LOCKED,
"Mutex read value was neither 0 nor 1"
);
const bool is_lock_acquired = *ret_val == MUTEX_UNLOCKED;
if (!is_lock_acquired) {
return MutexLockResultExt::ok(false); /* Lock already held. */
}
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::TrylockCasWrite>(
++currPos,
old_val,
SAddr(address),
ASize(size)
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return MutexLockResultExt::from_error(format_error(*err));
// We don't update Miri's memory for this operation so we don't need to know if the store was
// co-maximal, but we still check that we get a boolean result.
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: store part of mutex try_lock returned unexpected result."
);
return MutexLockResultExt::ok(true);
}
auto MiriGenmcShim::handle_mutex_unlock(ThreadId thread_id, uint64_t address, uint64_t size)
-> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::UnlockWrite>(
pos,
// As usual, we need to tell GenMC which value was stored at this location before this
// atomic access, if there previously was a non-atomic initializing access. We set the
// initial state of a mutex to be "unlocked".
/* old_val */ MUTEX_UNLOCKED,
MemOrdering::Release,
SAddr(address),
ASize(size),
AType::Signed,
/* store_value */ MUTEX_UNLOCKED,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return StoreResultExt::from_error(format_error(*err));
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&ret);
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: store part of mutex unlock returned unexpected result."
);
return StoreResultExt::ok(*is_coherence_order_maximal_write);
}

View file

@ -170,9 +170,8 @@ static auto to_genmc_verbosity_level(const LogLevel log_level) -> VerbosityLevel
// From a Miri perspective, this API doesn't work very well: most memory starts out
// "uninitialized";
// only statics have an initial value. And their initial value is just a sequence of bytes,
// but GenMC
// expect this to be already split into separate atomic variables. So we return a dummy
// value.
// but GenMC expect this to be already split into separate atomic variables. So we return a
// dummy value.
// This value should never be visible to the interpreted program.
// GenMC does not understand uninitialized memory the same way Miri does, which may cause
// this function to be called. The returned value can be visible to Miri or the user:
@ -183,13 +182,14 @@ static auto to_genmc_verbosity_level(const LogLevel log_level) -> VerbosityLevel
// Currently, atomic loads can see this value, unless initialized by an *atomic* store.
// FIXME(genmc): update this comment once mixed atomic-non-atomic support is added.
//
// FIXME(genmc): implement proper support for uninitialized memory in GenMC. Ideally, the
// initial value getter would return an `optional<SVal>`, since the memory location may be
// uninitialized.
// FIXME(genmc): implement proper support for uninitialized memory in GenMC.
// Ideally, the initial value getter would return an `optional<SVal>`, since the memory
// location may be uninitialized.
.initValGetter = [](const AAccess& a) { return SVal(0xDEAD); },
// Miri serves non-atomic loads from its own memory and these GenMC checks are wrong in
// that case. This should no longer be required with proper mixed-size access support.
.skipUninitLoadChecks = [](MemOrdering ord) { return ord == MemOrdering::NotAtomic; },
// Miri serves non-atomic loads from its own memory and these GenMC checks are wrong in that
// case. This should no longer be required with proper mixed-size access support.
.skipUninitLoadChecks = [](const MemAccessLabel* access_label
) { return access_label->getOrdering() == MemOrdering::NotAtomic; },
};
driver->setInterpCallbacks(std::move(interpreter_callbacks));

View file

@ -38,6 +38,7 @@ void MiriGenmcShim::handle_thread_join(ThreadId thread_id, ThreadId child_id) {
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(thread_id);
}
// FIXME(genmc): handle `HandleResult::{Invalid, Reset, VerificationError}` return values.
// NOTE: Thread return value is ignored, since Miri doesn't need it.
}

View file

@ -45,10 +45,14 @@ pub fn create_genmc_driver_handle(
}
impl GenmcScalar {
pub const UNINIT: Self = Self { value: 0, is_init: false };
pub const UNINIT: Self = Self { value: 0, extra: 0, is_init: false };
pub const fn from_u64(value: u64) -> Self {
Self { value, is_init: true }
Self { value, extra: 0, is_init: true }
}
pub const fn has_provenance(&self) -> bool {
self.extra != 0
}
}
@ -162,10 +166,16 @@ mod ffi {
}
/// This type corresponds to `Option<SVal>` (or `std::optional<SVal>`), where `SVal` is the type that GenMC uses for storing values.
/// CXX doesn't support `std::optional` currently, so we need to use an extra `bool` to define whether this value is initialized or not.
#[derive(Debug, Clone, Copy)]
struct GenmcScalar {
/// The raw byte-level value (discarding provenance, if any) of this scalar.
value: u64,
/// This is zero for integer values. For pointers, this encodes the provenance by
/// storing the base address of the allocation that this pointer belongs to.
/// Operations on `SVal` in GenMC (e.g., `fetch_add`) preserve the `extra` of the left argument (`left.fetch_add(right, ...)`).
extra: u64,
/// Indicates whether this value is initialized. If this is `false`, the other fields do not matter.
/// (Ideally we'd use `std::optional` but CXX does not support that.)
is_init: bool,
}
@ -173,6 +183,7 @@ mod ffi {
#[derive(Debug, Clone, Copy)]
enum ExecutionState {
Ok,
Error,
Blocked,
Finished,
}
@ -243,6 +254,17 @@ mod ffi {
is_coherence_order_maximal_write: bool,
}
#[must_use]
#[derive(Debug)]
struct MutexLockResult {
/// If there was an error, it will be stored in `error`, otherwise it is `None`.
error: UniquePtr<CxxString>,
/// If true, GenMC determined that we should retry the mutex lock operation once the thread attempting to lock is scheduled again.
is_reset: bool,
/// Indicate whether the lock was acquired by this thread.
is_lock_acquired: bool,
}
/**** These are GenMC types that we have to copy-paste here since cxx does not support
"importing" externally defined C++ types. ****/
@ -258,9 +280,11 @@ mod ffi {
/// Corresponds to GenMC's type with the same name.
/// Should only be modified if changed by GenMC.
enum ActionKind {
/// Any Mir terminator that's atomic and has load semantics.
/// Any MIR terminator that's atomic and that may have load semantics.
/// This includes functions with atomic properties, such as `pthread_create`.
/// If the exact type of the terminator cannot be determined, load is a safe default `Load`.
Load,
/// Anything that's not a `Load`.
/// Anything that's definitely not a `Load`.
NonLoad,
}
@ -292,6 +316,13 @@ mod ffi {
UMin = 10,
}
#[derive(Debug)]
enum AssumeType {
User = 0,
Barrier = 1,
Spinloop = 2,
}
// # Safety
//
// This block is unsafe to allow defining safe methods inside.
@ -310,6 +341,7 @@ mod ffi {
(This tells cxx that the enums defined above are already defined on the C++ side;
it will emit assertions to ensure that the two definitions agree.) ****/
type ActionKind;
type AssumeType;
type MemOrdering;
type RMWBinOp;
type SchedulePolicy;
@ -404,7 +436,8 @@ mod ffi {
size: u64,
alignment: u64,
) -> u64;
fn handle_free(self: Pin<&mut MiriGenmcShim>, thread_id: i32, address: u64);
/// Returns true if an error was found.
fn handle_free(self: Pin<&mut MiriGenmcShim>, thread_id: i32, address: u64) -> bool;
/**** Thread management ****/
fn handle_thread_create(self: Pin<&mut MiriGenmcShim>, thread_id: i32, parent_id: i32);
@ -412,6 +445,36 @@ mod ffi {
fn handle_thread_finish(self: Pin<&mut MiriGenmcShim>, thread_id: i32, ret_val: u64);
fn handle_thread_kill(self: Pin<&mut MiriGenmcShim>, thread_id: i32);
/**** Blocking instructions ****/
/// Inform GenMC that the thread should be blocked.
/// Note: this function is currently hardcoded for `AssumeType::User`, corresponding to user supplied assume statements.
/// This can become a parameter once more types of assumes are added.
fn handle_assume_block(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
assume_type: AssumeType,
);
/**** Mutex handling ****/
fn handle_mutex_lock(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
size: u64,
) -> MutexLockResult;
fn handle_mutex_try_lock(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
size: u64,
) -> MutexLockResult;
fn handle_mutex_unlock(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
size: u64,
) -> StoreResult;
/***** Exploration related functionality *****/
/// Ask the GenMC scheduler for a new thread to schedule and

View file

@ -1 +1 @@
f6092f224d2b1774b31033f12d0bee626943b02f
36e4f5d1fe1d63953a5bf1758ce2b64172623e2e

View file

@ -13,6 +13,7 @@ use rustc_middle::ty::TyCtxt;
pub use self::address_generator::AddressGenerator;
use self::reuse_pool::ReusePool;
use crate::concurrency::VClock;
use crate::diagnostics::SpanDedupDiagnostic;
use crate::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
@ -42,20 +43,19 @@ pub struct GlobalStateInner {
/// they do not have an `AllocExtra`.
/// This is the inverse of `int_to_ptr_map`.
base_addr: FxHashMap<AllocId, u64>,
/// Temporarily store prepared memory space for global allocations the first time their memory
/// address is required. This is used to ensure that the memory is allocated before Miri assigns
/// it an internal address, which is important for matching the internal address to the machine
/// address so FFI can read from pointers.
prepared_alloc_bytes: FxHashMap<AllocId, MiriAllocBytes>,
/// A pool of addresses we can reuse for future allocations.
reuse: ReusePool,
/// Whether an allocation has been exposed or not. This cannot be put
/// The set of exposed allocations. This cannot be put
/// into `AllocExtra` for the same reason as `base_addr`.
exposed: FxHashSet<AllocId>,
/// The generator for new addresses in a given range.
address_generator: AddressGenerator,
/// The provenance to use for int2ptr casts
provenance_mode: ProvenanceMode,
/// The generator for new addresses in a given range, and a pool for address reuse. This is
/// `None` if addresses are generated elsewhere (in native-lib mode or with GenMC).
address_generation: Option<(AddressGenerator, ReusePool)>,
/// Native-lib mode only: Temporarily store prepared memory space for global allocations the
/// first time their memory address is required. This is used to ensure that the memory is
/// allocated before Miri assigns it an internal address, which is important for matching the
/// internal address to the machine address so FFI can read from pointers.
prepared_alloc_bytes: Option<FxHashMap<AllocId, MiriAllocBytes>>,
}
impl VisitProvenance for GlobalStateInner {
@ -64,9 +64,8 @@ impl VisitProvenance for GlobalStateInner {
int_to_ptr_map: _,
base_addr: _,
prepared_alloc_bytes: _,
reuse: _,
exposed: _,
address_generator: _,
address_generation: _,
provenance_mode: _,
} = self;
// Though base_addr, int_to_ptr_map, and exposed contain AllocIds, we do not want to visit them.
@ -83,11 +82,16 @@ impl GlobalStateInner {
GlobalStateInner {
int_to_ptr_map: Vec::default(),
base_addr: FxHashMap::default(),
prepared_alloc_bytes: FxHashMap::default(),
reuse: ReusePool::new(config),
exposed: FxHashSet::default(),
address_generator: AddressGenerator::new(stack_addr..tcx.target_usize_max()),
provenance_mode: config.provenance_mode,
address_generation: (config.native_lib.is_empty() && config.genmc_config.is_none())
.then(|| {
(
AddressGenerator::new(stack_addr..tcx.target_usize_max()),
ReusePool::new(config),
)
}),
prepared_alloc_bytes: (!config.native_lib.is_empty()).then(FxHashMap::default),
}
}
@ -147,6 +151,8 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Store prepared allocation to be picked up for use later.
global_state
.prepared_alloc_bytes
.as_mut()
.unwrap()
.try_insert(alloc_id, prepared_bytes)
.unwrap();
ptr
@ -173,29 +179,25 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// We don't have to expose this pointer yet, we do that in `prepare_for_native_call`.
return interp_ok(base_ptr.addr().to_u64());
}
// We are not in native lib mode, so we control the addresses ourselves.
// We are not in native lib or genmc mode, so we control the addresses ourselves.
let (addr_gen, reuse) = global_state.address_generation.as_mut().unwrap();
let mut rng = this.machine.rng.borrow_mut();
if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(
&mut *rng,
info.size,
info.align,
memory_kind,
this.active_thread(),
) {
if let Some((reuse_addr, clock)) =
reuse.take_addr(&mut *rng, info.size, info.align, memory_kind, this.active_thread())
{
if let Some(clock) = clock {
this.acquire_clock(&clock)?;
}
interp_ok(reuse_addr)
} else {
// We have to pick a fresh address.
let new_addr =
global_state.address_generator.generate(info.size, info.align, &mut rng)?;
let new_addr = addr_gen.generate(info.size, info.align, &mut rng)?;
// If we filled up more than half the address space, start aggressively reusing
// addresses to avoid running out.
let remaining_range = global_state.address_generator.get_remaining();
let remaining_range = addr_gen.get_remaining();
if remaining_range.start > remaining_range.end / 2 {
global_state.reuse.address_space_shortage();
reuse.address_space_shortage();
}
interp_ok(new_addr)
@ -207,13 +209,7 @@ impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Returns the `AllocId` that corresponds to the specified addr,
// or `None` if the addr is out of bounds.
// Setting `only_exposed_allocations` selects whether only exposed allocations are considered.
fn alloc_id_from_addr(
&self,
addr: u64,
size: i64,
only_exposed_allocations: bool,
) -> Option<AllocId> {
fn alloc_id_from_addr(&self, addr: u64, size: i64) -> Option<AllocId> {
let this = self.eval_context_ref();
let global_state = this.machine.alloc_addresses.borrow();
assert!(global_state.provenance_mode != ProvenanceMode::Strict);
@ -242,13 +238,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}?;
// We only use this provenance if it has been exposed, or if the caller requested also non-exposed allocations
if !only_exposed_allocations || global_state.exposed.contains(&alloc_id) {
// We only use this provenance if it has been exposed.
if global_state.exposed.contains(&alloc_id) {
// This must still be live, since we remove allocations from `int_to_ptr_map` when they get freed.
// In GenMC mode, we keep all allocations, so this check doesn't apply there.
if this.machine.data_race.as_genmc_ref().is_none() {
debug_assert!(this.is_alloc_live(alloc_id));
}
debug_assert!(this.is_alloc_live(alloc_id));
Some(alloc_id)
} else {
None
@ -349,12 +342,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
match global_state.provenance_mode {
ProvenanceMode::Default => {
// The first time this happens at a particular location, print a warning.
let mut int2ptr_warned = this.machine.int2ptr_warned.borrow_mut();
let first = int2ptr_warned.is_empty();
if int2ptr_warned.insert(this.cur_span()) {
// Newly inserted, so first time we see this span.
this.emit_diagnostic(NonHaltingDiagnostic::Int2Ptr { details: first });
}
static DEDUP: SpanDedupDiagnostic = SpanDedupDiagnostic::new();
this.dedup_diagnostic(&DEDUP, |first| {
NonHaltingDiagnostic::Int2Ptr { details: first }
});
}
ProvenanceMode::Strict => {
throw_machine_stop!(TerminationInfo::Int2PtrWithStrictProvenance);
@ -414,6 +405,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let mut global_state = this.machine.alloc_addresses.borrow_mut();
let mut prepared_alloc_bytes = global_state
.prepared_alloc_bytes
.as_mut()
.unwrap()
.remove(&id)
.unwrap_or_else(|| panic!("alloc bytes for {id:?} have not been prepared"));
// Sanity-check that the prepared allocation has the right size and alignment.
@ -443,8 +436,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
alloc_id
} else {
// A wildcard pointer.
let only_exposed_allocations = true;
this.alloc_id_from_addr(addr.bytes(), size, only_exposed_allocations)?
this.alloc_id_from_addr(addr.bytes(), size)?
};
// This cannot fail: since we already have a pointer with that provenance, adjust_alloc_root_pointer
@ -465,13 +457,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
impl<'tcx> MiriMachine<'tcx> {
pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
// In GenMC mode, we can't remove dead allocation info since such pointers can
// still be stored in atomics and we need this info to convert GenMC pointers to Miri pointers.
// `global_state.reuse` is also unused so we can just skip this entire function.
if self.data_race.as_genmc_ref().is_some() {
return;
}
let global_state = self.alloc_addresses.get_mut();
let rng = self.rng.get_mut();
@ -496,15 +481,17 @@ impl<'tcx> MiriMachine<'tcx> {
// `alloc_id_from_addr` any more.
global_state.exposed.remove(&dead_id);
// Also remember this address for future reuse.
let thread = self.threads.active_thread();
global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
// We already excluded GenMC above. We cannot use `self.release_clock` as
// `self.alloc_addresses` is borrowed.
if let Some(data_race) = self.data_race.as_vclocks_ref() {
data_race.release_clock(&self.threads, |clock| clock.clone())
} else {
VClock::default()
}
})
if let Some((_addr_gen, reuse)) = global_state.address_generation.as_mut() {
let thread = self.threads.active_thread();
reuse.add_addr(rng, addr, size, align, kind, thread, || {
// We cannot be in GenMC mode as then `address_generation` is `None`. We cannot use
// `self.release_clock` as `self.alloc_addresses` is borrowed.
if let Some(data_race) = self.data_race.as_vclocks_ref() {
data_race.release_clock(&self.threads, |clock| clock.clone())
} else {
VClock::default()
}
})
}
}
}

View file

@ -188,6 +188,11 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
// Run in GenMC mode if enabled.
if config.genmc_config.is_some() {
// Validate GenMC settings.
if let Err(err) = GenmcConfig::validate(&mut config, tcx) {
fatal_error!("Invalid settings: {err}");
}
// This is the entry point used in GenMC mode.
// This closure will be called multiple times to explore the concurrent execution space of the program.
let eval_entry_once = |genmc_ctx: Rc<GenmcCtx>| {
@ -352,6 +357,7 @@ fn fatal_error_(msg: &impl std::fmt::Display) -> ! {
macro_rules! fatal_error {
($($tt:tt)*) => { $crate::fatal_error_(&format_args!($($tt)*)) };
}
#[allow(unused)] // use depends on cfg
use fatal_error;
/// Execute a compiler with the given CLI arguments and callbacks.
@ -744,11 +750,6 @@ fn main() {
let many_seeds =
many_seeds.map(|seeds| ManySeedsConfig { seeds, keep_going: many_seeds_keep_going });
// Validate settings for data race detection and GenMC mode.
if let Err(err) = GenmcConfig::validate_genmc_mode_settings(&mut miri_config) {
fatal_error!("Invalid settings: {err}");
}
if miri_config.weak_memory_emulation && !miri_config.data_race_detector {
fatal_error!(
"Weak memory emulation cannot be enabled when the data race detector is disabled"

View file

@ -221,7 +221,7 @@ impl AllocHistory {
pub fn new(id: AllocId, item: Item, machine: &MiriMachine<'_>) -> Self {
Self {
id,
root: (item, machine.current_span()),
root: (item, machine.current_user_relevant_span()),
creations: SmallVec::new(),
invalidations: SmallVec::new(),
protectors: SmallVec::new(),
@ -269,11 +269,11 @@ impl<'history, 'ecx, 'tcx> DiagnosticCx<'history, 'ecx, 'tcx> {
};
self.history
.creations
.push(Creation { retag: op.clone(), span: self.machine.current_span() });
.push(Creation { retag: op.clone(), span: self.machine.current_user_relevant_span() });
}
pub fn log_invalidation(&mut self, tag: BorTag) {
let mut span = self.machine.current_span();
let mut span = self.machine.current_user_relevant_span();
let (range, cause) = match &self.operation {
Operation::Retag(RetagOp { info, range, permission, .. }) => {
if info.cause == RetagCause::FnEntry {
@ -298,7 +298,7 @@ impl<'history, 'ecx, 'tcx> DiagnosticCx<'history, 'ecx, 'tcx> {
};
self.history
.protectors
.push(Protection { tag: op.new_tag, span: self.machine.current_span() });
.push(Protection { tag: op.new_tag, span: self.machine.current_user_relevant_span() });
}
pub fn get_logs_relevant_to(

View file

@ -6,6 +6,7 @@ mod item;
mod stack;
use std::fmt::Write;
use std::sync::atomic::AtomicBool;
use std::{cmp, mem};
use rustc_abi::{BackendRepr, Size};
@ -822,7 +823,8 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
let size = match size {
Some(size) => size,
None => {
if !this.machine.sb_extern_type_warned.replace(true) {
static DEDUP: AtomicBool = AtomicBool::new(false);
if !DEDUP.swap(true, std::sync::atomic::Ordering::Relaxed) {
this.emit_diagnostic(NonHaltingDiagnostic::ExternTypeReborrow);
}
return interp_ok(place.clone());

View file

@ -33,7 +33,7 @@ impl<'tcx> Tree {
machine: &MiriMachine<'tcx>,
) -> Self {
let tag = state.root_ptr_tag(id, machine); // Fresh tag for the root
let span = machine.current_span();
let span = machine.current_user_relevant_span();
Tree::new(tag, size, span)
}
@ -61,7 +61,7 @@ impl<'tcx> Tree {
ProvenanceExtra::Wildcard => return interp_ok(()),
};
let global = machine.borrow_tracker.as_ref().unwrap();
let span = machine.current_span();
let span = machine.current_user_relevant_span();
self.perform_access(
tag,
Some((range, access_kind, diagnostics::AccessCause::Explicit(access_kind))),
@ -86,7 +86,7 @@ impl<'tcx> Tree {
ProvenanceExtra::Wildcard => return interp_ok(()),
};
let global = machine.borrow_tracker.as_ref().unwrap();
let span = machine.current_span();
let span = machine.current_user_relevant_span();
self.dealloc(tag, alloc_range(Size::ZERO, size), global, alloc_id, span)
}
@ -107,7 +107,7 @@ impl<'tcx> Tree {
tag: BorTag,
alloc_id: AllocId, // diagnostics
) -> InterpResult<'tcx> {
let span = machine.current_span();
let span = machine.current_user_relevant_span();
// `None` makes it the magic on-protector-end operation
self.perform_access(tag, None, global, alloc_id, span)
}
@ -360,7 +360,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
Some((range_in_alloc, AccessKind::Read, diagnostics::AccessCause::Reborrow)),
this.machine.borrow_tracker.as_ref().unwrap(),
alloc_id,
this.machine.current_span(),
this.machine.current_user_relevant_span(),
)?;
// Also inform the data race model (but only if any bytes are actually affected).
@ -386,7 +386,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
inside_perms,
new_perm.outside_perm,
protected,
this.machine.current_span(),
this.machine.current_user_relevant_span(),
)?;
drop(tree_borrows);

View file

@ -1208,7 +1208,7 @@ impl VClockAlloc {
ty: Option<Ty<'_>>,
machine: &MiriMachine<'_>,
) -> InterpResult<'tcx> {
let current_span = machine.current_span();
let current_span = machine.current_user_relevant_span();
let global = machine.data_race.as_vclocks_ref().unwrap();
if !global.race_detecting() {
return interp_ok(());
@ -1250,7 +1250,7 @@ impl VClockAlloc {
ty: Option<Ty<'_>>,
machine: &mut MiriMachine<'_>,
) -> InterpResult<'tcx> {
let current_span = machine.current_span();
let current_span = machine.current_user_relevant_span();
let global = machine.data_race.as_vclocks_mut().unwrap();
if !global.race_detecting() {
return interp_ok(());
@ -1304,7 +1304,7 @@ impl Default for LocalClocks {
impl FrameState {
pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
let current_span = machine.current_span();
let current_span = machine.current_user_relevant_span();
let global = machine.data_race.as_vclocks_ref().unwrap();
if !global.race_detecting() {
return;
@ -1334,7 +1334,7 @@ impl FrameState {
}
pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
let current_span = machine.current_span();
let current_span = machine.current_user_relevant_span();
let global = machine.data_race.as_vclocks_ref().unwrap();
if !global.race_detecting() {
return;
@ -1573,7 +1573,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
size.bytes()
);
let current_span = this.machine.current_span();
let current_span = this.machine.current_user_relevant_span();
// Perform the atomic operation.
data_race.maybe_perform_sync_operation(
&this.machine.threads,
@ -1827,7 +1827,7 @@ impl GlobalState {
machine: &MiriMachine<'tcx>,
atomic: AtomicFenceOrd,
) -> InterpResult<'tcx> {
let current_span = machine.current_span();
let current_span = machine.current_user_relevant_span();
self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
@ -1915,7 +1915,7 @@ impl GlobalState {
callback: impl FnOnce(&VClock) -> R,
) -> R {
let thread = threads.active_thread();
let span = threads.active_thread_ref().current_span();
let span = threads.active_thread_ref().current_user_relevant_span();
let (index, mut clocks) = self.thread_state_mut(thread);
let r = callback(&clocks.clock);
// Increment the clock, so that all following events cannot be confused with anything that

View file

@ -1,4 +1,6 @@
use genmc_sys::LogLevel;
use rustc_abi::Endian;
use rustc_middle::ty::TyCtxt;
use super::GenmcParams;
use crate::{IsolatedOp, MiriConfig, RejectOpWith};
@ -32,8 +34,6 @@ impl GenmcConfig {
genmc_config: &mut Option<GenmcConfig>,
trimmed_arg: &str,
) -> Result<(), String> {
// FIXME(genmc): Ensure host == target somewhere.
if genmc_config.is_none() {
*genmc_config = Some(Default::default());
}
@ -86,11 +86,16 @@ impl GenmcConfig {
///
/// Unsupported configurations return an error.
/// Adjusts Miri settings where required, printing a warnings if the change might be unexpected for the user.
pub fn validate_genmc_mode_settings(miri_config: &mut MiriConfig) -> Result<(), &'static str> {
pub fn validate(miri_config: &mut MiriConfig, tcx: TyCtxt<'_>) -> Result<(), &'static str> {
let Some(genmc_config) = miri_config.genmc_config.as_mut() else {
return Ok(());
};
// Check for supported target.
if tcx.data_layout.endian != Endian::Little || tcx.data_layout.pointer_size().bits() != 64 {
return Err("GenMC only supports 64bit little-endian targets");
}
// Check for disallowed configurations.
if !miri_config.data_race_detector {
return Err("Cannot disable data race detection in GenMC mode");

View file

@ -1,11 +1,13 @@
use rustc_abi::{Align, Size};
use rustc_const_eval::interpret::{AllocId, InterpCx, InterpResult};
use rustc_middle::ty::TyCtxt;
pub use self::intercept::EvalContextExt as GenmcEvalContextExt;
pub use self::run::run_genmc_mode;
use crate::intrinsics::AtomicRmwOp;
use crate::{
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriMachine, Scalar,
ThreadId, ThreadManager, VisitProvenance, VisitWith,
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriMachine, OpTy,
Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith,
};
#[derive(Clone, Copy, Debug)]
@ -36,9 +38,36 @@ mod run {
}
}
mod intercept {
use super::*;
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn genmc_intercept_function(
&mut self,
_instance: rustc_middle::ty::Instance<'tcx>,
_args: &[rustc_const_eval::interpret::FnArg<'tcx, crate::Provenance>],
_dest: &crate::PlaceTy<'tcx>,
) -> InterpResult<'tcx, bool> {
unreachable!()
}
fn handle_genmc_verifier_assume(&mut self, _condition: &OpTy<'tcx>) -> InterpResult<'tcx> {
unreachable!();
}
}
}
impl GenmcCtx {
// We don't provide the `new` function in the dummy module.
pub(crate) fn schedule_thread<'tcx>(
&self,
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> InterpResult<'tcx, Option<ThreadId>> {
unreachable!()
}
/**** Memory access handling ****/
pub(super) fn set_ongoing_action_data_race_free(&self, _enable: bool) {
@ -191,26 +220,6 @@ impl GenmcCtx {
) -> InterpResult<'tcx> {
unreachable!()
}
/**** Scheduling functionality ****/
pub fn schedule_thread<'tcx>(
&self,
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> InterpResult<'tcx, ThreadId> {
unreachable!()
}
/**** Blocking instructions ****/
#[allow(unused)]
pub(crate) fn handle_verifier_assume<'tcx>(
&self,
_machine: &MiriMachine<'tcx>,
_condition: bool,
) -> InterpResult<'tcx, ()> {
unreachable!()
}
}
impl VisitProvenance for GenmcCtx {
@ -231,8 +240,9 @@ impl GenmcConfig {
}
}
pub fn validate_genmc_mode_settings(
pub fn validate(
_miri_config: &mut crate::MiriConfig,
_tcx: TyCtxt<'_>,
) -> Result<(), &'static str> {
Ok(())
}

View file

@ -1,52 +1,19 @@
use std::sync::RwLock;
use genmc_sys::{MemOrdering, RMWBinOp};
use rustc_abi::Size;
use rustc_const_eval::interpret::{InterpResult, interp_ok};
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::mir;
use rustc_middle::mir::interpret;
use rustc_middle::ty::ScalarInt;
use rustc_span::Span;
use tracing::debug;
use super::GenmcScalar;
use crate::diagnostics::EvalContextExt;
use crate::alloc_addresses::EvalContextExt as _;
use crate::intrinsics::AtomicRmwOp;
use crate::{
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, InterpCx, MiriInterpCx,
MiriMachine, NonHaltingDiagnostic, Scalar, throw_unsup_format,
};
use crate::*;
/// Maximum size memory access in bytes that GenMC supports.
pub(super) const MAX_ACCESS_SIZE: u64 = 8;
/// Type for storing spans for already emitted warnings.
pub(super) type WarningCache = RwLock<FxHashSet<Span>>;
#[derive(Default)]
pub(super) struct Warnings {
pub(super) compare_exchange_failure_ordering: WarningCache,
pub(super) compare_exchange_weak: WarningCache,
}
/// Emit a warning if it hasn't already been reported for current span.
pub(super) fn emit_warning<'tcx>(
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
cache: &WarningCache,
diagnostic: impl FnOnce() -> NonHaltingDiagnostic,
) {
let span = ecx.machine.current_span();
if cache.read().unwrap().contains(&span) {
return;
}
// This span has not yet been reported, so we insert it into the cache and report it.
let mut cache = cache.write().unwrap();
if cache.insert(span) {
// Some other thread may have added this span while we didn't hold the lock, so we only emit it if the insertions succeeded.
ecx.emit_diagnostic(diagnostic());
}
}
/// This function is used to split up a large memory access into aligned, non-overlapping chunks of a limited size.
/// Returns an iterator over the chunks, yielding `(base address, size)` of each chunk, ordered by address.
pub fn split_access(address: Size, size: Size) -> impl Iterator<Item = (u64, u64)> {
@ -80,19 +47,30 @@ pub fn split_access(address: Size, size: Size) -> impl Iterator<Item = (u64, u64
/// We cannot use the `AllocId` instead of the base address, since Miri has no control over the `AllocId`, and it may change across executions.
/// Pointers with `Wildcard` provenance are not supported.
pub fn scalar_to_genmc_scalar<'tcx>(
_ecx: &MiriInterpCx<'tcx>,
ecx: &MiriInterpCx<'tcx>,
genmc_ctx: &GenmcCtx,
scalar: Scalar,
) -> InterpResult<'tcx, GenmcScalar> {
interp_ok(match scalar {
rustc_const_eval::interpret::Scalar::Int(scalar_int) => {
// FIXME(genmc): Add u128 support once GenMC supports it.
let value: u64 = scalar_int.to_uint(scalar_int.size()).try_into().unwrap();
GenmcScalar { value, is_init: true }
GenmcScalar { value, extra: 0, is_init: true }
}
rustc_const_eval::interpret::Scalar::Ptr(pointer, size) => {
// FIXME(genmc,borrow tracking): Borrow tracking information is lost.
let addr = crate::Pointer::from(pointer).addr();
if let crate::Provenance::Wildcard = pointer.provenance {
throw_unsup_format!("Pointers with wildcard provenance not allowed in GenMC mode");
}
let (alloc_id, _size, _prov_extra) =
rustc_const_eval::interpret::Machine::ptr_get_alloc(ecx, pointer, size.into())
.unwrap();
let base_addr = ecx.addr_from_alloc_id(alloc_id, None)?;
// Add the base_addr alloc_id pair to the map.
genmc_ctx.exec_state.genmc_shared_allocs_map.borrow_mut().insert(base_addr, alloc_id);
GenmcScalar { value: addr.bytes(), extra: base_addr, is_init: true }
}
rustc_const_eval::interpret::Scalar::Ptr(_pointer, _size) =>
throw_unsup_format!(
"FIXME(genmc): Implement sending pointers (with provenance) to GenMC."
),
})
}
@ -101,16 +79,25 @@ pub fn scalar_to_genmc_scalar<'tcx>(
/// Convert a `GenmcScalar` back into a Miri `Scalar`.
/// For pointers, attempt to convert the stored base address of their allocation back into an `AllocId`.
pub fn genmc_scalar_to_scalar<'tcx>(
_ecx: &MiriInterpCx<'tcx>,
ecx: &MiriInterpCx<'tcx>,
genmc_ctx: &GenmcCtx,
scalar: GenmcScalar,
size: Size,
) -> InterpResult<'tcx, Scalar> {
// FIXME(genmc): Add GenmcScalar to Miri Pointer conversion.
// NOTE: GenMC always returns 64 bit values, and the upper bits are not yet truncated.
// FIXME(genmc): GenMC should be doing the truncation, not Miri.
let (value_scalar_int, _got_truncated) = ScalarInt::truncate_from_uint(scalar.value, size);
interp_ok(Scalar::Int(value_scalar_int))
// If `extra` is zero, we have a regular integer.
if scalar.extra == 0 {
// NOTE: GenMC always returns 64 bit values, and the upper bits are not yet truncated.
// FIXME(genmc): GenMC should be doing the truncation, not Miri.
let (value_scalar_int, _got_truncated) = ScalarInt::truncate_from_uint(scalar.value, size);
return interp_ok(Scalar::from(value_scalar_int));
}
// `extra` is non-zero, we have a pointer.
// When we get a pointer from GenMC, then we must have sent it to GenMC before in the same execution (since the reads-from relation is always respected).
let alloc_id = genmc_ctx.exec_state.genmc_shared_allocs_map.borrow()[&scalar.extra];
// FIXME(genmc,borrow tracking): Borrow tracking not yet supported.
let provenance = machine::Provenance::Concrete { alloc_id, tag: BorTag::default() };
let ptr = interpret::Pointer::new(provenance, Size::from_bytes(scalar.value));
interp_ok(Scalar::from_pointer(ptr, &ecx.tcx))
}
impl AtomicReadOrd {

View file

@ -7,36 +7,36 @@ use genmc_sys::{
};
use rustc_abi::{Align, Size};
use rustc_const_eval::interpret::{AllocId, InterpCx, InterpResult, interp_ok};
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::{throw_machine_stop, throw_ub_format, throw_unsup_format};
// FIXME(genmc,tracing): Implement some work-around for enabling debug/trace level logging (currently disabled statically in rustc).
use tracing::{debug, info};
use self::global_allocations::{EvalContextExt as _, GlobalAllocationHandler};
use self::helper::{
MAX_ACCESS_SIZE, Warnings, emit_warning, genmc_scalar_to_scalar,
maybe_upgrade_compare_exchange_success_orderings, scalar_to_genmc_scalar, to_genmc_rmw_op,
MAX_ACCESS_SIZE, genmc_scalar_to_scalar, maybe_upgrade_compare_exchange_success_orderings,
scalar_to_genmc_scalar, to_genmc_rmw_op,
};
use self::run::GenmcMode;
use self::thread_id_map::ThreadIdMap;
use crate::concurrency::genmc::helper::split_access;
use crate::diagnostics::SpanDedupDiagnostic;
use crate::intrinsics::AtomicRmwOp;
use crate::{
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig,
MiriMachine, MiriMemoryKind, NonHaltingDiagnostic, Scalar, TerminationInfo, ThreadId,
ThreadManager, VisitProvenance, VisitWith,
};
use crate::*;
mod config;
mod global_allocations;
mod helper;
mod run;
pub(crate) mod scheduling;
mod shims;
mod thread_id_map;
pub use genmc_sys::GenmcParams;
pub use self::config::GenmcConfig;
pub use self::run::run_genmc_mode;
pub use self::shims::EvalContextExt as GenmcEvalContextExt;
#[derive(Debug)]
pub enum ExecutionEndResult {
@ -83,6 +83,9 @@ struct PerExecutionState {
/// we cover all possible executions.
/// `None` if no thread has called `exit` and the main thread isn't finished yet.
exit_status: Cell<Option<ExitStatus>>,
/// Allocations in this map have been sent to GenMC, and should thus be kept around, since future loads from GenMC may return this allocation again.
genmc_shared_allocs_map: RefCell<FxHashMap<u64, AllocId>>,
}
impl PerExecutionState {
@ -90,6 +93,7 @@ impl PerExecutionState {
self.allow_data_races.replace(false);
self.thread_id_manager.borrow_mut().reset();
self.exit_status.set(None);
self.genmc_shared_allocs_map.borrow_mut().clear();
}
}
@ -97,18 +101,11 @@ struct GlobalState {
/// Keep track of global allocations, to ensure they keep the same address across different executions, even if the order of allocations changes.
/// The `AllocId` for globals is stable across executions, so we can use it as an identifier.
global_allocations: GlobalAllocationHandler,
/// Cache for which warnings have already been shown to the user.
/// `None` if warnings are disabled.
warning_cache: Option<Warnings>,
}
impl GlobalState {
fn new(target_usize_max: u64, print_warnings: bool) -> Self {
Self {
global_allocations: GlobalAllocationHandler::new(target_usize_max),
warning_cache: print_warnings.then(Default::default),
}
fn new(target_usize_max: u64) -> Self {
Self { global_allocations: GlobalAllocationHandler::new(target_usize_max) }
}
}
@ -203,6 +200,14 @@ impl GenmcCtx {
fn get_alloc_data_races(&self) -> bool {
self.exec_state.allow_data_races.get()
}
/// Get the GenMC id of the currently active thread.
#[must_use]
fn active_thread_genmc_tid<'tcx>(&self, machine: &MiriMachine<'tcx>) -> i32 {
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread = machine.threads.active_thread();
thread_infos.get_genmc_tid(curr_thread)
}
}
/// GenMC event handling. These methods are used to inform GenMC about events happening in the program, and to handle scheduling decisions.
@ -266,13 +271,13 @@ impl GenmcCtx {
) -> InterpResult<'tcx, Scalar> {
assert!(!self.get_alloc_data_races(), "atomic load with data race checking disabled.");
let genmc_old_value = if let Some(scalar) = old_val {
scalar_to_genmc_scalar(ecx, scalar)?
scalar_to_genmc_scalar(ecx, self, scalar)?
} else {
GenmcScalar::UNINIT
};
let read_value =
self.handle_load(&ecx.machine, address, size, ordering.to_genmc(), genmc_old_value)?;
genmc_scalar_to_scalar(ecx, read_value, size)
genmc_scalar_to_scalar(ecx, self, read_value, size)
}
/// Inform GenMC about an atomic store.
@ -289,9 +294,9 @@ impl GenmcCtx {
ordering: AtomicWriteOrd,
) -> InterpResult<'tcx, bool> {
assert!(!self.get_alloc_data_races(), "atomic store with data race checking disabled.");
let genmc_value = scalar_to_genmc_scalar(ecx, value)?;
let genmc_value = scalar_to_genmc_scalar(ecx, self, value)?;
let genmc_old_value = if let Some(scalar) = old_value {
scalar_to_genmc_scalar(ecx, scalar)?
scalar_to_genmc_scalar(ecx, self, scalar)?
} else {
GenmcScalar::UNINIT
};
@ -312,12 +317,10 @@ impl GenmcCtx {
ordering: AtomicFenceOrd,
) -> InterpResult<'tcx> {
assert!(!self.get_alloc_data_races(), "atomic fence with data race checking disabled.");
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
self.handle.borrow_mut().pin_mut().handle_fence(genmc_tid, ordering.to_genmc());
self.handle
.borrow_mut()
.pin_mut()
.handle_fence(self.active_thread_genmc_tid(machine), ordering.to_genmc());
interp_ok(())
}
@ -343,8 +346,8 @@ impl GenmcCtx {
size,
ordering,
to_genmc_rmw_op(atomic_op, is_signed),
scalar_to_genmc_scalar(ecx, rhs_scalar)?,
scalar_to_genmc_scalar(ecx, old_value)?,
scalar_to_genmc_scalar(ecx, self, rhs_scalar)?,
scalar_to_genmc_scalar(ecx, self, old_value)?,
)
}
@ -366,8 +369,8 @@ impl GenmcCtx {
size,
ordering,
/* genmc_rmw_op */ RMWBinOp::Xchg,
scalar_to_genmc_scalar(ecx, rhs_scalar)?,
scalar_to_genmc_scalar(ecx, old_value)?,
scalar_to_genmc_scalar(ecx, self, rhs_scalar)?,
scalar_to_genmc_scalar(ecx, self, old_value)?,
)
}
@ -405,43 +408,36 @@ impl GenmcCtx {
let upgraded_success_ordering =
maybe_upgrade_compare_exchange_success_orderings(success, fail);
if let Some(warning_cache) = &self.global_state.warning_cache {
// FIXME(genmc): remove once GenMC supports failure memory ordering in `compare_exchange`.
let (effective_failure_ordering, _) =
upgraded_success_ordering.split_memory_orderings();
// Return a warning if the actual orderings don't match the upgraded ones.
if success != upgraded_success_ordering || effective_failure_ordering != fail {
emit_warning(ecx, &warning_cache.compare_exchange_failure_ordering, || {
NonHaltingDiagnostic::GenmcCompareExchangeOrderingMismatch {
success_ordering: success,
upgraded_success_ordering,
failure_ordering: fail,
effective_failure_ordering,
}
});
}
// FIXME(genmc): remove once GenMC implements spurious failures for `compare_exchange_weak`.
if can_fail_spuriously {
emit_warning(ecx, &warning_cache.compare_exchange_weak, || {
NonHaltingDiagnostic::GenmcCompareExchangeWeak
});
}
// FIXME(genmc): remove once GenMC supports failure memory ordering in `compare_exchange`.
let (effective_failure_ordering, _) = upgraded_success_ordering.split_memory_orderings();
// Return a warning if the actual orderings don't match the upgraded ones.
if success != upgraded_success_ordering || effective_failure_ordering != fail {
static DEDUP: SpanDedupDiagnostic = SpanDedupDiagnostic::new();
ecx.dedup_diagnostic(&DEDUP, |_first| {
NonHaltingDiagnostic::GenmcCompareExchangeOrderingMismatch {
success_ordering: success,
upgraded_success_ordering,
failure_ordering: fail,
effective_failure_ordering,
}
});
}
// FIXME(genmc): remove once GenMC implements spurious failures for `compare_exchange_weak`.
if can_fail_spuriously {
static DEDUP: SpanDedupDiagnostic = SpanDedupDiagnostic::new();
ecx.dedup_diagnostic(&DEDUP, |_first| NonHaltingDiagnostic::GenmcCompareExchangeWeak);
}
debug!(
"GenMC: atomic_compare_exchange, address: {address:?}, size: {size:?} (expect: {expected_old_value:?}, new: {new_value:?}, old_value: {old_value:?}, {success:?}, orderings: {fail:?}), can fail spuriously: {can_fail_spuriously}"
);
let thread_infos = self.exec_state.thread_id_manager.borrow();
let genmc_tid = thread_infos.get_genmc_tid(ecx.machine.threads.active_thread());
let cas_result = self.handle.borrow_mut().pin_mut().handle_compare_exchange(
genmc_tid,
self.active_thread_genmc_tid(&ecx.machine),
address.bytes(),
size.bytes(),
scalar_to_genmc_scalar(ecx, expected_old_value)?,
scalar_to_genmc_scalar(ecx, new_value)?,
scalar_to_genmc_scalar(ecx, old_value)?,
scalar_to_genmc_scalar(ecx, self, expected_old_value)?,
scalar_to_genmc_scalar(ecx, self, new_value)?,
scalar_to_genmc_scalar(ecx, self, old_value)?,
upgraded_success_ordering.to_genmc(),
fail.to_genmc(),
can_fail_spuriously,
@ -452,7 +448,7 @@ impl GenmcCtx {
throw_ub_format!("{}", error.to_string_lossy());
}
let return_scalar = genmc_scalar_to_scalar(ecx, cas_result.old_value, size)?;
let return_scalar = genmc_scalar_to_scalar(ecx, self, cas_result.old_value, size)?;
debug!(
"GenMC: atomic_compare_exchange: result: {cas_result:?}, returning scalar: {return_scalar:?}"
);
@ -597,14 +593,10 @@ impl GenmcCtx {
return ecx
.get_global_allocation_address(&self.global_state.global_allocations, alloc_id);
}
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
// GenMC doesn't support ZSTs, so we set the minimum size to 1 byte
let genmc_size = size.bytes().max(1);
let chosen_address = self.handle.borrow_mut().pin_mut().handle_malloc(
genmc_tid,
self.active_thread_genmc_tid(machine),
genmc_size,
alignment.bytes(),
);
@ -638,11 +630,16 @@ impl GenmcCtx {
!self.get_alloc_data_races(),
"memory deallocation with data race checking disabled."
);
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
self.handle.borrow_mut().pin_mut().handle_free(genmc_tid, address.bytes());
if self
.handle
.borrow_mut()
.pin_mut()
.handle_free(self.active_thread_genmc_tid(machine), address.bytes())
{
// FIXME(genmc): improve error handling.
// An error was detected, so we get the error string from GenMC.
throw_ub_format!("{}", self.try_get_error().unwrap());
}
interp_ok(())
}
@ -692,7 +689,7 @@ impl GenmcCtx {
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
debug!("GenMC: thread {curr_thread_id:?} ({genmc_tid:?}) finished.");
// NOTE: Miri doesn't support return values for threads, but GenMC expects one, so we return 0
// NOTE: Miri doesn't support return values for threads, but GenMC expects one, so we return 0.
self.handle.borrow_mut().pin_mut().handle_thread_finish(genmc_tid, /* ret_val */ 0);
}
@ -732,17 +729,6 @@ impl GenmcCtx {
self.exec_state.exit_status.set(Some(exit_status));
interp_ok(())
}
/**** Blocking instructions ****/
#[allow(unused)]
pub(crate) fn handle_verifier_assume<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
condition: bool,
) -> InterpResult<'tcx, ()> {
if condition { interp_ok(()) } else { self.handle_user_block(machine) }
}
}
impl GenmcCtx {
@ -765,17 +751,12 @@ impl GenmcCtx {
"GenMC mode currently does not support atomics larger than {MAX_ACCESS_SIZE} bytes.",
);
}
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread_id = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
debug!(
"GenMC: load, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} == {addr:#x}, size: {size:?}, ordering: {memory_ordering:?}, old_value: {genmc_old_value:x?}",
"GenMC: load, address: {addr} == {addr:#x}, size: {size:?}, ordering: {memory_ordering:?}, old_value: {genmc_old_value:x?}",
addr = address.bytes()
);
let load_result = self.handle.borrow_mut().pin_mut().handle_load(
genmc_tid,
self.active_thread_genmc_tid(machine),
address.bytes(),
size.bytes(),
memory_ordering,
@ -816,17 +797,12 @@ impl GenmcCtx {
"GenMC mode currently does not support atomics larger than {MAX_ACCESS_SIZE} bytes."
);
}
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread_id = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
debug!(
"GenMC: store, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} = {addr:#x}, size: {size:?}, ordering {memory_ordering:?}, value: {genmc_value:?}",
"GenMC: store, address: {addr} = {addr:#x}, size: {size:?}, ordering {memory_ordering:?}, value: {genmc_value:?}",
addr = address.bytes()
);
let store_result = self.handle.borrow_mut().pin_mut().handle_store(
genmc_tid,
self.active_thread_genmc_tid(machine),
address.bytes(),
size.bytes(),
genmc_value,
@ -867,14 +843,11 @@ impl GenmcCtx {
MAX_ACCESS_SIZE,
size.bytes()
);
let curr_thread_id = ecx.machine.threads.active_thread();
let genmc_tid = self.exec_state.thread_id_manager.borrow().get_genmc_tid(curr_thread_id);
debug!(
"GenMC: atomic_rmw_op, thread: {curr_thread_id:?} ({genmc_tid:?}) (op: {genmc_rmw_op:?}, rhs value: {genmc_rhs_scalar:?}), address: {address:?}, size: {size:?}, ordering: {ordering:?}",
"GenMC: atomic_rmw_op (op: {genmc_rmw_op:?}, rhs value: {genmc_rhs_scalar:?}), address: {address:?}, size: {size:?}, ordering: {ordering:?}",
);
let rmw_result = self.handle.borrow_mut().pin_mut().handle_read_modify_write(
genmc_tid,
self.active_thread_genmc_tid(&ecx.machine),
address.bytes(),
size.bytes(),
genmc_rmw_op,
@ -888,28 +861,22 @@ impl GenmcCtx {
throw_ub_format!("{}", error.to_string_lossy());
}
let old_value_scalar = genmc_scalar_to_scalar(ecx, rmw_result.old_value, size)?;
let old_value_scalar = genmc_scalar_to_scalar(ecx, self, rmw_result.old_value, size)?;
let new_value_scalar = if rmw_result.is_coherence_order_maximal_write {
Some(genmc_scalar_to_scalar(ecx, rmw_result.new_value, size)?)
Some(genmc_scalar_to_scalar(ecx, self, rmw_result.new_value, size)?)
} else {
None
};
interp_ok((old_value_scalar, new_value_scalar))
}
/**** Blocking functionality ****/
/// Handle a user thread getting blocked.
/// This may happen due to an manual `assume` statement added by a user
/// or added by some automated program transformation, e.g., for spinloops.
fn handle_user_block<'tcx>(&self, _machine: &MiriMachine<'tcx>) -> InterpResult<'tcx> {
todo!()
}
}
impl VisitProvenance for GenmcCtx {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// We don't have any tags.
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
let genmc_shared_allocs_map = self.exec_state.genmc_shared_allocs_map.borrow();
for alloc_id in genmc_shared_allocs_map.values().copied() {
visit(Some(alloc_id), None);
}
}
}

View file

@ -16,13 +16,6 @@ pub(super) enum GenmcMode {
Verification,
}
impl GenmcMode {
/// Return whether warnings on unsupported features should be printed in this mode.
fn print_unsupported_warnings(self) -> bool {
self == GenmcMode::Verification
}
}
/// Do a complete run of the program in GenMC mode.
/// This will call `eval_entry` multiple times, until either:
/// - An error is detected (indicated by a `None` return value)
@ -57,8 +50,7 @@ fn run_genmc_mode_impl<'tcx>(
// There exists only one `global_state` per full run in GenMC mode.
// It is shared by all `GenmcCtx` in this run.
// FIXME(genmc): implement multithreading once GenMC supports it.
let global_state =
Arc::new(GlobalState::new(tcx.target_usize_max(), mode.print_unsupported_warnings()));
let global_state = Arc::new(GlobalState::new(tcx.target_usize_max()));
let genmc_ctx = Rc::new(GenmcCtx::new(config, global_state, mode));
// `rep` is used to report the progress, Miri will panic on wrap-around.

View file

@ -1,58 +1,122 @@
use genmc_sys::{ActionKind, ExecutionState};
use rustc_middle::mir::TerminatorKind;
use rustc_middle::ty::{self, Ty};
use super::GenmcCtx;
use crate::{
InterpCx, InterpResult, MiriMachine, TerminationInfo, ThreadId, interp_ok, throw_machine_stop,
};
enum NextInstrKind {
MaybeAtomic(ActionKind),
NonAtomic,
}
/// Check if a call or tail-call could have atomic load semantics.
fn get_next_instruction_kind<'tcx>(
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> InterpResult<'tcx, NextInstrKind> {
use NextInstrKind::*;
let thread_manager = &ecx.machine.threads;
// Determine whether the next instruction in the current thread might be a load.
// This is used for the "writes-first" scheduling in GenMC.
// Scheduling writes before reads can be beneficial for verification performance.
// `Load` is a safe default for the next instruction type if we cannot guarantee that it isn't a load.
if !thread_manager.active_thread_ref().is_enabled() {
// The current thread can get blocked (e.g., due to a thread join, `Mutex::lock`, assume statement, ...), then we need to ask GenMC for another thread to schedule.
// Most to all blocking operations have load semantics, since they wait on something to change in another thread,
// e.g., a thread join waiting on another thread to finish (join loads the return value(s) of the other thread),
// or a thread waiting for another thread to unlock a `Mutex`, which loads the mutex state (Locked, Unlocked).
// `Load` is a safe default for the next instruction type, since we may not know what the next instruction is.
return interp_ok(MaybeAtomic(ActionKind::Load));
}
// This thread is still enabled. If it executes a terminator next, we consider yielding,
// but in all other cases we just keep running this thread since it never makes sense
// to yield before a non-atomic operation.
let Some(frame) = thread_manager.active_thread_stack().last() else {
return interp_ok(NonAtomic);
};
let either::Either::Left(loc) = frame.current_loc() else {
// We are unwinding, so the next step is definitely not atomic.
return interp_ok(NonAtomic);
};
let basic_block = &frame.body().basic_blocks[loc.block];
if let Some(_statement) = basic_block.statements.get(loc.statement_index) {
// Statements can't be atomic.
return interp_ok(NonAtomic);
}
match &basic_block.terminator().kind {
// All atomics are modeled as function calls to intrinsic functions.
// The one exception is thread joining, but those are also calls.
TerminatorKind::Call { func, .. } | TerminatorKind::TailCall { func, .. } =>
get_function_kind(ecx, func.ty(&frame.body().local_decls, *ecx.tcx)),
// Non-call terminators are not atomic.
_ => interp_ok(NonAtomic),
}
}
fn get_function_kind<'tcx>(
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
func_ty: Ty<'tcx>,
) -> InterpResult<'tcx, NextInstrKind> {
use NextInstrKind::*;
let callee_def_id = match func_ty.kind() {
ty::FnDef(def_id, _args) => *def_id,
_ => return interp_ok(MaybeAtomic(ActionKind::Load)), // we don't know the callee, might be pthread_join
};
let Some(intrinsic_def) = ecx.tcx.intrinsic(callee_def_id) else {
if ecx.tcx.is_foreign_item(callee_def_id) {
// Some shims, like pthread_join, must be considered loads. So just consider them all loads,
// these calls are not *that* common.
return interp_ok(MaybeAtomic(ActionKind::Load));
}
// NOTE: Functions intercepted by Miri in `concurrency/genmc/intercep.rs` must also be added here.
// Such intercepted functions, like `sys::Mutex::lock`, should be treated as atomics to ensure we call the scheduler when we encounter one of them.
// These functions must also be classified whether they may have load semantics.
if ecx.tcx.is_diagnostic_item(rustc_span::sym::sys_mutex_lock, callee_def_id)
|| ecx.tcx.is_diagnostic_item(rustc_span::sym::sys_mutex_try_lock, callee_def_id)
{
return interp_ok(MaybeAtomic(ActionKind::Load));
} else if ecx.tcx.is_diagnostic_item(rustc_span::sym::sys_mutex_unlock, callee_def_id) {
return interp_ok(MaybeAtomic(ActionKind::NonLoad));
}
// The next step is a call to a regular Rust function.
return interp_ok(NonAtomic);
};
let intrinsic_name = intrinsic_def.name.as_str();
let Some(suffix) = intrinsic_name.strip_prefix("atomic_") else {
return interp_ok(NonAtomic); // Non-atomic intrinsic, so guaranteed not an atomic load
};
// `atomic_store`, `atomic_fence` and `atomic_singlethreadfence` are not considered loads.
// Any future `atomic_*` intrinsics may have load semantics, so we err on the side of caution and classify them as "maybe loads".
interp_ok(MaybeAtomic(if matches!(suffix, "store" | "fence" | "singlethreadfence") {
ActionKind::NonLoad
} else {
ActionKind::Load
}))
}
impl GenmcCtx {
/// Returns the thread ID of the next thread to schedule, or `None` to continue with the current thread.
pub(crate) fn schedule_thread<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> InterpResult<'tcx, ThreadId> {
let thread_manager = &ecx.machine.threads;
let active_thread_id = thread_manager.active_thread();
// Determine whether the next instruction in the current thread might be a load.
// This is used for the "writes-first" scheduling in GenMC.
// Scheduling writes before reads can be beneficial for verification performance.
// `Load` is a safe default for the next instruction type if we cannot guarantee that it isn't a load.
let curr_thread_next_instr_kind = if !thread_manager.active_thread_ref().is_enabled() {
// The current thread can get blocked (e.g., due to a thread join, `Mutex::lock`, assume statement, ...), then we need to ask GenMC for another thread to schedule.
// Most to all blocking operations have load semantics, since they wait on something to change in another thread,
// e.g., a thread join waiting on another thread to finish (join loads the return value(s) of the other thread),
// or a thread waiting for another thread to unlock a `Mutex`, which loads the mutex state (Locked, Unlocked).
ActionKind::Load
} else {
// This thread is still enabled. If it executes a terminator next, we consider yielding,
// but in all other cases we just keep running this thread since it never makes sense
// to yield before a non-atomic operation.
let Some(frame) = thread_manager.active_thread_stack().last() else {
return interp_ok(active_thread_id);
};
let either::Either::Left(loc) = frame.current_loc() else {
// We are unwinding, so the next step is definitely not atomic.
return interp_ok(active_thread_id);
};
let basic_block = &frame.body().basic_blocks[loc.block];
if let Some(_statement) = basic_block.statements.get(loc.statement_index) {
// Statements can't be atomic.
return interp_ok(active_thread_id);
}
// FIXME(genmc): determine terminator kind.
ActionKind::Load
) -> InterpResult<'tcx, Option<ThreadId>> {
let atomic_kind = match get_next_instruction_kind(ecx)? {
NextInstrKind::MaybeAtomic(atomic_kind) => atomic_kind,
NextInstrKind::NonAtomic => return interp_ok(None), // No need to reschedule on a non-atomic.
};
let active_thread_id = ecx.machine.threads.active_thread();
let thread_infos = self.exec_state.thread_id_manager.borrow();
let genmc_tid = thread_infos.get_genmc_tid(active_thread_id);
let mut mc = self.handle.borrow_mut();
let pinned_mc = mc.as_mut().unwrap();
let result = pinned_mc.schedule_next(genmc_tid, curr_thread_next_instr_kind);
let result = self.handle.borrow_mut().pin_mut().schedule_next(genmc_tid, atomic_kind);
// Depending on the exec_state, we either schedule the given thread, or we are finished with this execution.
match result.exec_state {
ExecutionState::Ok => interp_ok(thread_infos.get_miri_tid(result.next_thread)),
ExecutionState::Ok => interp_ok(Some(thread_infos.get_miri_tid(result.next_thread))),
ExecutionState::Blocked => throw_machine_stop!(TerminationInfo::GenmcBlockedExecution),
ExecutionState::Finished => {
let exit_status = self.exec_state.exit_status.get().expect(
@ -63,6 +127,14 @@ impl GenmcCtx {
leak_check: exit_status.do_leak_check()
});
}
ExecutionState::Error => {
// GenMC found an error in one of the `handle_*` functions, but didn't return the detected error from the function immediately.
// This is still an bug in the user program, so we print the error string.
panic!(
"GenMC found an error ({:?}), but didn't report it immediately, so we cannot provide an appropriate source code location for where it happened.",
self.try_get_error().unwrap()
);
}
_ => unreachable!(),
}
}

View file

@ -0,0 +1,234 @@
use genmc_sys::AssumeType;
use rustc_middle::ty;
use tracing::debug;
use crate::concurrency::genmc::MAX_ACCESS_SIZE;
use crate::concurrency::thread::EvalContextExt as _;
use crate::*;
impl GenmcCtx {
/// Handle a user thread getting blocked.
/// This may happen due to an manual `assume` statement added by a user
/// or added by some automated program transformation, e.g., for spinloops.
fn handle_assume_block<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
assume_type: AssumeType,
) -> InterpResult<'tcx> {
debug!("GenMC: assume statement, blocking active thread.");
self.handle
.borrow_mut()
.pin_mut()
.handle_assume_block(self.active_thread_genmc_tid(machine), assume_type);
interp_ok(())
}
}
// Handling of code intercepted by Miri in GenMC mode, such as assume statement or `std::sync::Mutex`.
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Small helper to get the arguments of an intercepted function call.
fn get_fn_args<const N: usize>(
&self,
instance: ty::Instance<'tcx>,
args: &[FnArg<'tcx>],
) -> InterpResult<'tcx, [OpTy<'tcx>; N]> {
let this = self.eval_context_ref();
let args = this.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
if let Ok(ops) = args.try_into() {
return interp_ok(ops);
}
panic!("{} is a diagnostic item expected to have {} arguments", instance, N);
}
/**** Blocking functionality ****/
/// Handle a thread getting blocked by a user assume (not an automatically generated assume).
/// Unblocking this thread in the current execution will cause a panic.
/// Miri does not provide GenMC with the annotations to determine when to unblock the thread, so it should never be unblocked.
fn handle_user_assume_block(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
debug!(
"GenMC: block thread {:?} due to failing assume statement.",
this.machine.threads.active_thread()
);
assert!(this.machine.threads.active_thread_ref().is_enabled());
// Block the thread on the GenMC side.
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
genmc_ctx.handle_assume_block(&this.machine, AssumeType::User)?;
// Block the thread on the Miri side.
this.block_thread(
BlockReason::Genmc,
None,
callback!(
@capture<'tcx> {}
|_this, unblock: UnblockKind| {
assert_eq!(unblock, UnblockKind::Ready);
unreachable!("GenMC should never unblock a thread blocked by an `assume`.");
}
),
);
interp_ok(())
}
fn intercept_mutex_lock(&mut self, mutex: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
debug!("GenMC: handling Mutex::lock()");
let this = self.eval_context_mut();
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
let size = mutex.layout.size.bytes();
assert!(
size <= MAX_ACCESS_SIZE,
"Mutex is larger than maximal size of a memory access supported by GenMC ({size} > {MAX_ACCESS_SIZE})"
);
let result = genmc_ctx.handle.borrow_mut().pin_mut().handle_mutex_lock(
genmc_ctx.active_thread_genmc_tid(&this.machine),
mutex.ptr().addr().bytes(),
size,
);
if let Some(error) = result.error.as_ref() {
// FIXME(genmc): improve error handling.
throw_ub_format!("{}", error.to_string_lossy());
}
if result.is_reset {
debug!("GenMC: Mutex::lock: Reset");
// GenMC informed us to reset and try the lock again later.
// We block the current thread until GenMC schedules it again.
this.block_thread(
crate::BlockReason::Genmc,
None,
crate::callback!(
@capture<'tcx> {
mutex: MPlaceTy<'tcx>,
}
|this, unblock: crate::UnblockKind| {
debug!("GenMC: Mutex::lock: unblocking callback called, attempting to lock the Mutex again.");
assert_eq!(unblock, crate::UnblockKind::Ready);
this.intercept_mutex_lock(mutex)?;
interp_ok(())
}
),
);
} else if result.is_lock_acquired {
debug!("GenMC: Mutex::lock successfully acquired the Mutex.");
} else {
debug!("GenMC: Mutex::lock failed to acquire the Mutex, permanently blocking thread.");
// NOTE: `handle_mutex_lock` already blocked the current thread on the GenMC side.
this.block_thread(
crate::BlockReason::Genmc,
None,
crate::callback!(
@capture<'tcx> {
mutex: MPlaceTy<'tcx>,
}
|_this, _unblock: crate::UnblockKind| {
unreachable!("A thread blocked on `Mutex::lock` should not be unblocked again.");
}
),
);
}
// NOTE: We don't write anything back to Miri's memory where the Mutex is located, that state is handled only by GenMC.
interp_ok(())
}
fn intercept_mutex_try_lock(
&mut self,
mutex: MPlaceTy<'tcx>,
dest: &crate::PlaceTy<'tcx>,
) -> InterpResult<'tcx> {
debug!("GenMC: handling Mutex::try_lock()");
let this = self.eval_context_mut();
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
let size = mutex.layout.size.bytes();
assert!(
size <= MAX_ACCESS_SIZE,
"Mutex is larger than maximal size of a memory access supported by GenMC ({size} > {MAX_ACCESS_SIZE})"
);
let result = genmc_ctx.handle.borrow_mut().pin_mut().handle_mutex_try_lock(
genmc_ctx.active_thread_genmc_tid(&this.machine),
mutex.ptr().addr().bytes(),
size,
);
if let Some(error) = result.error.as_ref() {
// FIXME(genmc): improve error handling.
throw_ub_format!("{}", error.to_string_lossy());
}
debug!(
"GenMC: Mutex::try_lock(): is_reset: {}, is_lock_acquired: {}",
result.is_reset, result.is_lock_acquired
);
assert!(!result.is_reset, "GenMC returned 'reset' for a mutex try_lock.");
// Write the return value of try_lock, i.e., whether we acquired the mutex.
this.write_scalar(Scalar::from_bool(result.is_lock_acquired), dest)?;
// NOTE: We don't write anything back to Miri's memory where the Mutex is located, that state is handled only by GenMC.
interp_ok(())
}
fn intercept_mutex_unlock(&self, mutex: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
debug!("GenMC: handling Mutex::unlock()");
let this = self.eval_context_ref();
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
let result = genmc_ctx.handle.borrow_mut().pin_mut().handle_mutex_unlock(
genmc_ctx.active_thread_genmc_tid(&this.machine),
mutex.ptr().addr().bytes(),
mutex.layout.size.bytes(),
);
if let Some(error) = result.error.as_ref() {
// FIXME(genmc): improve error handling.
throw_ub_format!("{}", error.to_string_lossy());
}
// NOTE: We don't write anything back to Miri's memory where the Mutex is located, that state is handled only by GenMC.}
interp_ok(())
}
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Given a `ty::Instance<'tcx>`, do any required special handling.
/// Returns true if this `instance` should be skipped (i.e., no MIR should be executed for it).
fn genmc_intercept_function(
&mut self,
instance: rustc_middle::ty::Instance<'tcx>,
args: &[rustc_const_eval::interpret::FnArg<'tcx, crate::Provenance>],
dest: &crate::PlaceTy<'tcx>,
) -> InterpResult<'tcx, bool> {
let this = self.eval_context_mut();
assert!(
this.machine.data_race.as_genmc_ref().is_some(),
"This function should only be called in GenMC mode."
);
// NOTE: When adding new intercepted functions here, they must also be added to `fn get_function_kind` in `concurrency/genmc/scheduling.rs`.
use rustc_span::sym;
if this.tcx.is_diagnostic_item(sym::sys_mutex_lock, instance.def_id()) {
let [mutex] = this.get_fn_args(instance, args)?;
let mutex = this.deref_pointer(&mutex)?;
this.intercept_mutex_lock(mutex)?;
} else if this.tcx.is_diagnostic_item(sym::sys_mutex_try_lock, instance.def_id()) {
let [mutex] = this.get_fn_args(instance, args)?;
let mutex = this.deref_pointer(&mutex)?;
this.intercept_mutex_try_lock(mutex, dest)?;
} else if this.tcx.is_diagnostic_item(sym::sys_mutex_unlock, instance.def_id()) {
let [mutex] = this.get_fn_args(instance, args)?;
let mutex = this.deref_pointer(&mutex)?;
this.intercept_mutex_unlock(mutex)?;
} else {
// Nothing to intercept.
return interp_ok(false);
}
interp_ok(true)
}
/// Handle an `assume` statement. This will tell GenMC to block the current thread if the `condition` is false.
/// Returns `true` if the current thread should be blocked in Miri too.
fn handle_genmc_verifier_assume(&mut self, condition: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let condition_bool = this.read_scalar(condition)?.to_bool()?;
debug!("GenMC: handle_genmc_verifier_assume, condition: {condition:?} = {condition_bool}");
if condition_bool {
return interp_ok(());
}
this.handle_user_assume_block()
}
}

View file

@ -22,5 +22,5 @@ pub mod weak_memory;
mod genmc;
pub use self::data_race_handler::{AllocDataRaceHandler, GlobalDataRaceHandler};
pub use self::genmc::{ExitType, GenmcConfig, GenmcCtx, run_genmc_mode};
pub use self::genmc::{ExitType, GenmcConfig, GenmcCtx, GenmcEvalContextExt, run_genmc_mode};
pub use self::vector_clock::VClock;

View file

@ -110,6 +110,9 @@ pub enum BlockReason {
Eventfd,
/// Blocked on unnamed_socket.
UnnamedSocket,
/// Blocked for any reason related to GenMC, such as `assume` statements (GenMC mode only).
/// Will be implicitly unblocked when GenMC schedules this thread again.
Genmc,
}
/// The state of a thread.
@ -260,7 +263,7 @@ impl<'tcx> Thread<'tcx> {
self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
}
pub fn current_span(&self) -> Span {
pub fn current_user_relevant_span(&self) -> Span {
self.top_user_relevant_frame()
.map(|frame_idx| self.stack[frame_idx].current_span())
.unwrap_or(rustc_span::DUMMY_SP)
@ -572,6 +575,7 @@ impl<'tcx> ThreadManager<'tcx> {
/// See <https://docs.microsoft.com/en-us/windows/win32/procthread/thread-handles-and-identifiers>:
/// > The handle is valid until closed, even after the thread it represents has been terminated.
fn detach_thread(&mut self, id: ThreadId, allow_terminated_joined: bool) -> InterpResult<'tcx> {
// NOTE: In GenMC mode, we treat detached threads like regular threads that are never joined, so there is no special handling required here.
trace!("detaching {:?}", id);
let is_ub = if allow_terminated_joined && self.threads[id].state.is_terminated() {
@ -704,14 +708,31 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
// In GenMC mode, we let GenMC do the scheduling.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
let next_thread_id = genmc_ctx.schedule_thread(this)?;
let thread_manager = &mut this.machine.threads;
thread_manager.active_thread = next_thread_id;
assert!(thread_manager.threads[thread_manager.active_thread].state.is_enabled());
return interp_ok(SchedulingAction::ExecuteStep);
if this.machine.data_race.as_genmc_ref().is_some() {
loop {
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
let Some(next_thread_id) = genmc_ctx.schedule_thread(this)? else {
return interp_ok(SchedulingAction::ExecuteStep);
};
// If a thread is blocked on GenMC, we have to implicitly unblock it when it gets scheduled again.
if this.machine.threads.threads[next_thread_id]
.state
.is_blocked_on(BlockReason::Genmc)
{
info!(
"GenMC: scheduling blocked thread {next_thread_id:?}, so we unblock it now."
);
this.unblock_thread(next_thread_id, BlockReason::Genmc)?;
}
// The thread we just unblocked may have been blocked again during the unblocking callback.
// In that case, we need to ask for a different thread to run next.
let thread_manager = &mut this.machine.threads;
if thread_manager.threads[next_thread_id].state.is_enabled() {
// Set the new active thread.
thread_manager.active_thread = next_thread_id;
return interp_ok(SchedulingAction::ExecuteStep);
}
}
}
// We are not in GenMC mode, so we control the scheduling.
@ -856,7 +877,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let mut state = tls::TlsDtorsState::default();
Box::new(move |m| state.on_stack_empty(m))
});
let current_span = this.machine.current_span();
let current_span = this.machine.current_user_relevant_span();
match &mut this.machine.data_race {
GlobalDataRaceHandler::None => {}
GlobalDataRaceHandler::Vclocks(data_race) =>

View file

@ -1,9 +1,11 @@
use std::fmt::{self, Write};
use std::num::NonZero;
use std::sync::Mutex;
use rustc_abi::{Align, Size};
use rustc_errors::{Diag, DiagMessage, Level};
use rustc_span::{DUMMY_SP, SpanData, Symbol};
use rustc_hash::FxHashSet;
use rustc_span::{DUMMY_SP, Span, SpanData, Symbol};
use crate::borrow_tracker::stacked_borrows::diagnostics::TagHistory;
use crate::borrow_tracker::tree_borrows::diagnostics as tree_diagnostics;
@ -835,4 +837,45 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
&this.machine,
);
}
/// Call `f` only if this is the first time we are seeing this span.
/// The `first` parameter indicates whether this is the first time *ever* that this diagnostic
/// is emitted.
fn dedup_diagnostic(
&self,
dedup: &SpanDedupDiagnostic,
f: impl FnOnce(/*first*/ bool) -> NonHaltingDiagnostic,
) {
let this = self.eval_context_ref();
// We want to deduplicate both based on where the error seems to be located "from the user
// perspective", and the location of the actual operation (to avoid warning about the same
// operation called from different places in the local code).
let span1 = this.machine.current_user_relevant_span();
// For the "location of the operation", we still skip `track_caller` frames, to match the
// span that the diagnostic will point at.
let span2 = this
.active_thread_stack()
.iter()
.rev()
.find(|frame| !frame.instance().def.requires_caller_location(*this.tcx))
.map(|frame| frame.current_span())
.unwrap_or(span1);
let mut lock = dedup.0.lock().unwrap();
let first = lock.is_empty();
// Avoid mutating the hashset unless both spans are new.
if !lock.contains(&span2) && lock.insert(span1) && (span1 == span2 || lock.insert(span2)) {
// Both of the two spans were newly inserted.
this.emit_diagnostic(f(first));
}
}
}
/// Helps deduplicate a diagnostic to ensure it is only shown once per span.
pub struct SpanDedupDiagnostic(Mutex<FxHashSet<Span>>);
impl SpanDedupDiagnostic {
pub const fn new() -> Self {
Self(Mutex::new(FxHashSet::with_hasher(rustc_hash::FxBuildHasher)))
}
}

View file

@ -1,10 +1,12 @@
use std::num::NonZero;
use std::sync::Mutex;
use std::time::Duration;
use std::{cmp, iter};
use rand::RngCore;
use rustc_abi::{Align, ExternAbi, FieldIdx, FieldsShape, Size, Variants};
use rustc_apfloat::Float;
use rustc_hash::FxHashSet;
use rustc_hir::Safety;
use rustc_hir::def::{DefKind, Namespace};
use rustc_hir::def_id::{CRATE_DEF_INDEX, CrateNum, DefId, LOCAL_CRATE};
@ -649,7 +651,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
match reject_with {
RejectOpWith::Abort => isolation_abort_error(op_name),
RejectOpWith::WarningWithoutBacktrace => {
let mut emitted_warnings = this.machine.reject_in_isolation_warned.borrow_mut();
// Deduplicate these warnings *by shim* (not by span)
static DEDUP: Mutex<FxHashSet<String>> =
Mutex::new(FxHashSet::with_hasher(rustc_hash::FxBuildHasher));
let mut emitted_warnings = DEDUP.lock().unwrap();
if !emitted_warnings.contains(op_name) {
// First time we are seeing this.
emitted_warnings.insert(op_name.to_owned());
@ -1058,8 +1063,8 @@ impl<'tcx> MiriMachine<'tcx> {
/// `#[track_caller]`.
/// This function is backed by a cache, and can be assumed to be very fast.
/// It will work even when the stack is empty.
pub fn current_span(&self) -> Span {
self.threads.active_thread_ref().current_span()
pub fn current_user_relevant_span(&self) -> Span {
self.threads.active_thread_ref().current_user_relevant_span()
}
/// Returns the span of the *caller* of the current operation, again

View file

@ -1,7 +1,7 @@
use rand::Rng;
use rustc_apfloat::Float;
use rustc_middle::ty::FloatTy;
use rustc_middle::ty;
use rustc_middle::ty::FloatTy;
use super::check_intrinsic_arg_count;
use crate::helpers::{ToHost, ToSoft};
@ -79,7 +79,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
FloatTy::F128 => unimplemented!("f16_f128"),
};
this.write_scalar(val, &dest)?;
}
}

View file

@ -60,6 +60,7 @@ extern crate rustc_ast;
extern crate rustc_const_eval;
extern crate rustc_data_structures;
extern crate rustc_errors;
extern crate rustc_hash;
extern crate rustc_hir;
extern crate rustc_index;
extern crate rustc_middle;
@ -109,6 +110,7 @@ pub type StrictPointer = interpret::Pointer<machine::Provenance>;
pub type Scalar = interpret::Scalar<machine::Provenance>;
pub type ImmTy<'tcx> = interpret::ImmTy<'tcx, machine::Provenance>;
pub type OpTy<'tcx> = interpret::OpTy<'tcx, machine::Provenance>;
pub type FnArg<'tcx> = interpret::FnArg<'tcx, machine::Provenance>;
pub type PlaceTy<'tcx> = interpret::PlaceTy<'tcx, machine::Provenance>;
pub type MPlaceTy<'tcx> = interpret::MPlaceTy<'tcx, machine::Provenance>;

View file

@ -31,7 +31,9 @@ use rustc_target::callconv::FnAbi;
use crate::alloc_addresses::EvalContextExt;
use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
use crate::concurrency::{
AllocDataRaceHandler, GenmcCtx, GenmcEvalContextExt as _, GlobalDataRaceHandler, weak_memory,
};
use crate::*;
/// First real-time signal.
@ -649,16 +651,6 @@ pub struct MiriMachine<'tcx> {
pub(crate) pthread_rwlock_sanity: Cell<bool>,
pub(crate) pthread_condvar_sanity: Cell<bool>,
/// Remembers whether we already warned about an extern type with Stacked Borrows.
pub(crate) sb_extern_type_warned: Cell<bool>,
/// Remember whether we already warned about sharing memory with a native call.
#[allow(unused)]
pub(crate) native_call_mem_warned: Cell<bool>,
/// Remembers which shims have already shown the warning about erroring in isolation.
pub(crate) reject_in_isolation_warned: RefCell<FxHashSet<String>>,
/// Remembers which int2ptr casts we have already warned about.
pub(crate) int2ptr_warned: RefCell<FxHashSet<Span>>,
/// Cache for `mangle_internal_symbol`.
pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
@ -777,9 +769,8 @@ impl<'tcx> MiriMachine<'tcx> {
local_crates,
extern_statics: FxHashMap::default(),
rng: RefCell::new(rng),
allocator: if !config.native_lib.is_empty() {
Some(Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new())))
} else { None },
allocator: (!config.native_lib.is_empty())
.then(|| Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new()))),
tracked_alloc_ids: config.tracked_alloc_ids.clone(),
track_alloc_accesses: config.track_alloc_accesses,
check_alignment: config.check_alignment,
@ -827,10 +818,6 @@ impl<'tcx> MiriMachine<'tcx> {
pthread_mutex_sanity: Cell::new(false),
pthread_rwlock_sanity: Cell::new(false),
pthread_condvar_sanity: Cell::new(false),
sb_extern_type_warned: Cell::new(false),
native_call_mem_warned: Cell::new(false),
reject_in_isolation_warned: Default::default(),
int2ptr_warned: Default::default(),
mangle_internal_symbol_cache: Default::default(),
force_intrinsic_fallback: config.force_intrinsic_fallback,
float_nondet: config.float_nondet,
@ -920,7 +907,7 @@ impl<'tcx> MiriMachine<'tcx> {
&ecx.machine.threads,
size,
kind,
ecx.machine.current_span(),
ecx.machine.current_user_relevant_span(),
),
data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
),
@ -944,7 +931,7 @@ impl<'tcx> MiriMachine<'tcx> {
ecx.machine
.allocation_spans
.borrow_mut()
.insert(id, (ecx.machine.current_span(), None));
.insert(id, (ecx.machine.current_user_relevant_span(), None));
}
interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
@ -1004,10 +991,6 @@ impl VisitProvenance for MiriMachine<'_> {
pthread_mutex_sanity: _,
pthread_rwlock_sanity: _,
pthread_condvar_sanity: _,
sb_extern_type_warned: _,
native_call_mem_warned: _,
reject_in_isolation_warned: _,
int2ptr_warned: _,
mangle_internal_symbol_cache: _,
force_intrinsic_fallback: _,
float_nondet: _,
@ -1182,7 +1165,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
ecx: &mut MiriInterpCx<'tcx>,
instance: ty::Instance<'tcx>,
abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[FnArg<'tcx, Provenance>],
args: &[FnArg<'tcx>],
dest: &PlaceTy<'tcx>,
ret: Option<mir::BasicBlock>,
unwind: mir::UnwindAction,
@ -1201,6 +1184,13 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
}
if ecx.machine.data_race.as_genmc_ref().is_some()
&& ecx.genmc_intercept_function(instance, args, dest)?
{
ecx.return_to_block(ret)?;
return interp_ok(None);
}
// Otherwise, load the MIR.
let _trace = enter_trace_span!("load_mir");
interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
@ -1211,7 +1201,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
ecx: &mut MiriInterpCx<'tcx>,
fn_val: DynSym,
abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[FnArg<'tcx, Provenance>],
args: &[FnArg<'tcx>],
dest: &PlaceTy<'tcx>,
ret: Option<mir::BasicBlock>,
unwind: mir::UnwindAction,
@ -1567,7 +1557,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
}
if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
{
*deallocated_at = Some(machine.current_span());
*deallocated_at = Some(machine.current_user_relevant_span());
}
machine.free_alloc_id(alloc_id, size, align, kind);
interp_ok(())

View file

@ -16,6 +16,7 @@ use rustc_target::callconv::FnAbi;
use super::alloc::EvalContextExt as _;
use super::backtrace::EvalContextExt as _;
use crate::concurrency::GenmcEvalContextExt as _;
use crate::helpers::EvalContextExt as _;
use crate::*;
@ -485,6 +486,17 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
// GenMC mode: Assume statements block the current thread when their condition is false.
"miri_genmc_assume" => {
let [condition] =
this.check_shim_sig_lenient(abi, CanonAbi::Rust, link_name, args)?;
if this.machine.data_race.as_genmc_ref().is_some() {
this.handle_genmc_verifier_assume(condition)?;
} else {
throw_unsup_format!("miri_genmc_assume is only supported in GenMC mode")
}
}
// Aborting the process.
"exit" => {
let [code] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
@ -815,6 +827,23 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.mem_copy(ptr_src, ptr_dest, Size::from_bytes(n), true)?;
this.write_pointer(ptr_dest, dest)?;
}
"memset" => {
let [ptr_dest, val, n] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let ptr_dest = this.read_pointer(ptr_dest)?;
let val = this.read_scalar(val)?.to_i32()?;
let n = this.read_target_usize(n)?;
// The docs say val is "interpreted as unsigned char".
#[expect(clippy::as_conversions)]
let val = val as u8;
// C requires that this must always be a valid pointer, even if `n` is zero, so we better check that.
this.ptr_get_alloc_id(ptr_dest, 0)?;
let bytes = std::iter::repeat_n(val, n.try_into().unwrap());
this.write_bytes_ptr(ptr_dest, bytes)?;
this.write_pointer(ptr_dest, dest)?;
}
// LLVM intrinsics
"llvm.prefetch" => {

View file

@ -1,11 +1,13 @@
//! Implements calling functions from a native library.
use std::ops::Deref;
use std::sync::atomic::AtomicBool;
use libffi::low::CodePtr;
use libffi::middle::Type as FfiType;
use rustc_abi::{HasDataLayout, Size};
use rustc_middle::ty::{self as ty, IntTy, Ty, UintTy};
use rustc_middle::ty::layout::HasTypingEnv;
use rustc_middle::ty::{self, IntTy, Ty, UintTy};
use rustc_span::Symbol;
use serde::{Deserialize, Serialize};
@ -219,11 +221,9 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// so we cannot assume 1 access = 1 allocation. :(
let mut rg = evt_rg.addr..evt_rg.end();
while let Some(curr) = rg.next() {
let Some(alloc_id) = this.alloc_id_from_addr(
curr.to_u64(),
rg.len().try_into().unwrap(),
/* only_exposed_allocations */ true,
) else {
let Some(alloc_id) =
this.alloc_id_from_addr(curr.to_u64(), rg.len().try_into().unwrap())
else {
throw_ub_format!("Foreign code did an out-of-bounds access!")
};
let alloc = this.get_alloc_raw(alloc_id)?;
@ -281,8 +281,8 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Helper to print a warning when a pointer is shared with the native code.
let expose = |prov: Provenance| -> InterpResult<'tcx> {
// The first time this happens, print a warning.
if !this.machine.native_call_mem_warned.replace(true) {
static DEDUP: AtomicBool = AtomicBool::new(false);
if !DEDUP.swap(true, std::sync::atomic::Ordering::Relaxed) {
// Newly set, so first time we get here.
this.emit_diagnostic(NonHaltingDiagnostic::NativeCallSharedMem { tracing });
}
@ -374,15 +374,13 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
adt_def: ty::AdtDef<'tcx>,
args: &'tcx ty::List<ty::GenericArg<'tcx>>,
) -> InterpResult<'tcx, FfiType> {
// TODO: Certain non-C reprs should be okay also.
if !adt_def.repr().c() {
throw_unsup_format!("passing a non-#[repr(C)] struct over FFI: {orig_ty}")
}
// TODO: unions, etc.
if !adt_def.is_struct() {
throw_unsup_format!(
"unsupported argument type for native call: {orig_ty} is an enum or union"
);
throw_unsup_format!("passing an enum or union over FFI: {orig_ty}");
}
// TODO: Certain non-C reprs should be okay also.
if !adt_def.repr().c() {
throw_unsup_format!("passing a non-#[repr(C)] {} over FFI: {orig_ty}", adt_def.descr())
}
let this = self.eval_context_ref();
@ -396,19 +394,24 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Gets the matching libffi type for a given Ty.
fn ty_to_ffitype(&self, ty: Ty<'tcx>) -> InterpResult<'tcx, FfiType> {
let this = self.eval_context_ref();
interp_ok(match ty.kind() {
ty::Int(IntTy::I8) => FfiType::i8(),
ty::Int(IntTy::I16) => FfiType::i16(),
ty::Int(IntTy::I32) => FfiType::i32(),
ty::Int(IntTy::I64) => FfiType::i64(),
ty::Int(IntTy::Isize) => FfiType::isize(),
// the uints
ty::Uint(UintTy::U8) => FfiType::u8(),
ty::Uint(UintTy::U16) => FfiType::u16(),
ty::Uint(UintTy::U32) => FfiType::u32(),
ty::Uint(UintTy::U64) => FfiType::u64(),
ty::Uint(UintTy::Usize) => FfiType::usize(),
ty::RawPtr(..) => FfiType::pointer(),
ty::RawPtr(pointee_ty, _mut) => {
if !pointee_ty.is_sized(*this.tcx, this.typing_env()) {
throw_unsup_format!("passing a pointer to an unsized type over FFI: {}", ty);
}
FfiType::pointer()
}
ty::Adt(adt_def, args) => self.adt_to_ffitype(ty, *adt_def, args)?,
_ => throw_unsup_format!("unsupported argument type for native call: {}", ty),
})

View file

@ -0,0 +1,8 @@
use std::ptr;
// null is explicitly called out as UB in the C docs for `memset`.
fn main() {
unsafe {
libc::memset(ptr::null_mut(), 0, 0); //~ERROR: null pointer
}
}

View file

@ -0,0 +1,15 @@
error: Undefined Behavior: pointer not dereferenceable: pointer must point to some allocation, but got null pointer
--> tests/fail-dep/libc/memset_null.rs:LL:CC
|
LL | libc::memset(ptr::null_mut(), 0, 0);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `main` at tests/fail-dep/libc/memset_null.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -2,7 +2,7 @@
//@[tree]compile-flags: -Zmiri-tree-borrows
use std::mem;
pub fn safe(x: &mut i32, y: &mut i32) {
fn safe(x: &mut i32, y: &mut i32) {
//~[stack]^ ERROR: protect
*x = 1; //~[tree] ERROR: /write access through .* is forbidden/
*y = 2;

View file

@ -1,8 +1,8 @@
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected
--> tests/fail/both_borrows/aliasing_mut1.rs:LL:CC
|
LL | pub fn safe(x: &mut i32, y: &mut i32) {
| ^ Undefined Behavior occurred here
LL | fn safe(x: &mut i32, y: &mut i32) {
| ^ Undefined Behavior occurred here
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
@ -14,8 +14,8 @@ LL | let xraw: *mut i32 = unsafe { mem::transmute(&mut x) };
help: <TAG> is this argument
--> tests/fail/both_borrows/aliasing_mut1.rs:LL:CC
|
LL | pub fn safe(x: &mut i32, y: &mut i32) {
| ^
LL | fn safe(x: &mut i32, y: &mut i32) {
| ^
= note: BACKTRACE (of the first span):
= note: inside `safe` at tests/fail/both_borrows/aliasing_mut1.rs:LL:CC
note: inside `main`

View file

@ -10,13 +10,13 @@ LL | *x = 1;
help: the accessed tag <TAG> was created here, in the initial state Reserved
--> tests/fail/both_borrows/aliasing_mut1.rs:LL:CC
|
LL | pub fn safe(x: &mut i32, y: &mut i32) {
| ^
LL | fn safe(x: &mut i32, y: &mut i32) {
| ^
help: the accessed tag <TAG> later transitioned to Reserved (conflicted) due to a reborrow (acting as a foreign read access) at offsets [0x0..0x4]
--> tests/fail/both_borrows/aliasing_mut1.rs:LL:CC
|
LL | pub fn safe(x: &mut i32, y: &mut i32) {
| ^
LL | fn safe(x: &mut i32, y: &mut i32) {
| ^
= help: this transition corresponds to a temporary loss of write permissions until function exit
= note: BACKTRACE (of the first span):
= note: inside `safe` at tests/fail/both_borrows/aliasing_mut1.rs:LL:CC

View file

@ -2,7 +2,7 @@
//@[tree]compile-flags: -Zmiri-tree-borrows
use std::mem;
pub fn safe(x: &i32, y: &mut i32) {
fn safe(x: &i32, y: &mut i32) {
//~[stack]^ ERROR: protect
let _v = *x;
*y = 2; //~[tree] ERROR: /write access through .* is forbidden/

View file

@ -1,8 +1,8 @@
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected
--> tests/fail/both_borrows/aliasing_mut2.rs:LL:CC
|
LL | pub fn safe(x: &i32, y: &mut i32) {
| ^ Undefined Behavior occurred here
LL | fn safe(x: &i32, y: &mut i32) {
| ^ Undefined Behavior occurred here
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
@ -14,8 +14,8 @@ LL | let xref = &mut x;
help: <TAG> is this argument
--> tests/fail/both_borrows/aliasing_mut2.rs:LL:CC
|
LL | pub fn safe(x: &i32, y: &mut i32) {
| ^
LL | fn safe(x: &i32, y: &mut i32) {
| ^
= note: BACKTRACE (of the first span):
= note: inside `safe` at tests/fail/both_borrows/aliasing_mut2.rs:LL:CC
note: inside `main`

View file

@ -10,8 +10,8 @@ LL | *y = 2;
help: the accessed tag <TAG> was created here, in the initial state Reserved
--> tests/fail/both_borrows/aliasing_mut2.rs:LL:CC
|
LL | pub fn safe(x: &i32, y: &mut i32) {
| ^
LL | fn safe(x: &i32, y: &mut i32) {
| ^
help: the accessed tag <TAG> later transitioned to Reserved (conflicted) due to a foreign read access at offsets [0x0..0x4]
--> tests/fail/both_borrows/aliasing_mut2.rs:LL:CC
|

View file

@ -2,7 +2,7 @@
//@[tree]compile-flags: -Zmiri-tree-borrows
use std::mem;
pub fn safe(x: &mut i32, y: &i32) {
fn safe(x: &mut i32, y: &i32) {
//~[stack]^ ERROR: borrow stack
*x = 1; //~[tree] ERROR: /write access through .* is forbidden/
let _v = *y;

View file

@ -1,8 +1,8 @@
error: Undefined Behavior: trying to retag from <TAG> for SharedReadOnly permission at ALLOC[0x0], but that tag does not exist in the borrow stack for this location
--> tests/fail/both_borrows/aliasing_mut3.rs:LL:CC
|
LL | pub fn safe(x: &mut i32, y: &i32) {
| ^ this error occurs as part of function-entry retag at ALLOC[0x0..0x4]
LL | fn safe(x: &mut i32, y: &i32) {
| ^ this error occurs as part of function-entry retag at ALLOC[0x0..0x4]
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information

View file

@ -10,13 +10,13 @@ LL | *x = 1;
help: the accessed tag <TAG> was created here, in the initial state Reserved
--> tests/fail/both_borrows/aliasing_mut3.rs:LL:CC
|
LL | pub fn safe(x: &mut i32, y: &i32) {
| ^
LL | fn safe(x: &mut i32, y: &i32) {
| ^
help: the accessed tag <TAG> later transitioned to Reserved (conflicted) due to a reborrow (acting as a foreign read access) at offsets [0x0..0x4]
--> tests/fail/both_borrows/aliasing_mut3.rs:LL:CC
|
LL | pub fn safe(x: &mut i32, y: &i32) {
| ^
LL | fn safe(x: &mut i32, y: &i32) {
| ^
= help: this transition corresponds to a temporary loss of write permissions until function exit
= note: BACKTRACE (of the first span):
= note: inside `safe` at tests/fail/both_borrows/aliasing_mut3.rs:LL:CC

View file

@ -5,7 +5,7 @@ use std::cell::Cell;
use std::mem;
// Make sure &mut UnsafeCell also is exclusive
pub fn safe(x: &i32, y: &mut Cell<i32>) {
fn safe(x: &i32, y: &mut Cell<i32>) {
//~[stack]^ ERROR: protect
y.set(1);
let _load = *x;

View file

@ -1,8 +1,8 @@
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected
--> tests/fail/both_borrows/aliasing_mut4.rs:LL:CC
|
LL | pub fn safe(x: &i32, y: &mut Cell<i32>) {
| ^ Undefined Behavior occurred here
LL | fn safe(x: &i32, y: &mut Cell<i32>) {
| ^ Undefined Behavior occurred here
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
@ -14,8 +14,8 @@ LL | let xref = &mut x;
help: <TAG> is this argument
--> tests/fail/both_borrows/aliasing_mut4.rs:LL:CC
|
LL | pub fn safe(x: &i32, y: &mut Cell<i32>) {
| ^
LL | fn safe(x: &i32, y: &mut Cell<i32>) {
| ^
= note: BACKTRACE (of the first span):
= note: inside `safe` at tests/fail/both_borrows/aliasing_mut4.rs:LL:CC
note: inside `main`

View file

@ -17,8 +17,8 @@ LL | y.set(1);
help: the protected tag <TAG> was created here, in the initial state Frozen
--> tests/fail/both_borrows/aliasing_mut4.rs:LL:CC
|
LL | pub fn safe(x: &i32, y: &mut Cell<i32>) {
| ^
LL | fn safe(x: &i32, y: &mut Cell<i32>) {
| ^
= note: BACKTRACE (of the first span):
= note: inside `std::mem::replace::<i32>` at RUSTLIB/core/src/mem/mod.rs:LL:CC
= note: inside `std::cell::Cell::<i32>::replace` at RUSTLIB/core/src/cell.rs:LL:CC

View file

@ -12,7 +12,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer = AtomicPtr::new(null_mut::<MaybeUninit<usize>>());
let ptr = EvilSend(&pointer as *const AtomicPtr<MaybeUninit<usize>>);

View file

@ -11,7 +11,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer = AtomicPtr::new(null_mut::<usize>());
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);

View file

@ -10,7 +10,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = AtomicUsize::new(0);
let b = &mut a as *mut AtomicUsize;
let c = EvilSend(b);

View file

@ -10,7 +10,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = AtomicUsize::new(0);
let b = &mut a as *mut AtomicUsize;
let c = EvilSend(b);

View file

@ -10,7 +10,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = AtomicUsize::new(0);
let b = &mut a as *mut AtomicUsize;
let c = EvilSend(b);

View file

@ -10,7 +10,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = AtomicUsize::new(0);
let b = &mut a as *mut AtomicUsize;
let c = EvilSend(b);

View file

@ -10,7 +10,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = AtomicUsize::new(0);
let b = &mut a as *mut AtomicUsize;
let c = EvilSend(b);

View file

@ -10,7 +10,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = AtomicUsize::new(0);
let b = &mut a as *mut AtomicUsize;
let c = EvilSend(b);

View file

@ -16,7 +16,7 @@ extern "Rust" {
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
let ptr = EvilSend(pointer);

View file

@ -16,7 +16,7 @@ extern "Rust" {
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
let ptr = EvilSend(pointer);

View file

@ -12,7 +12,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer = AtomicPtr::new(null_mut::<usize>());
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);

View file

@ -15,7 +15,7 @@ extern "Rust" {
#[rustc_std_internal_symbol]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
let ptr = EvilSend(pointer);

View file

@ -15,7 +15,7 @@ extern "Rust" {
#[rustc_std_internal_symbol]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
let ptr = EvilSend(pointer);

View file

@ -12,7 +12,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer = AtomicPtr::new(null_mut::<usize>());
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);

View file

@ -9,7 +9,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
// Enable and then join with multiple threads.
let t1 = spawn(|| ());
let t2 = spawn(|| ());

View file

@ -9,7 +9,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = 0u32;
let b = &mut a as *mut u32;
let c = EvilSend(b);

View file

@ -12,7 +12,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer = AtomicPtr::new(null_mut::<usize>());
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);

View file

@ -12,7 +12,7 @@ unsafe impl<T> Sync for EvilSend<T> {}
static SYNC: AtomicUsize = AtomicUsize::new(0);
pub fn main() {
fn main() {
let mut a = 0u32;
let b = &mut a as *mut u32;
let c = EvilSend(b);

View file

@ -13,7 +13,7 @@ unsafe impl<T> Sync for EvilSend<T> {}
static SYNC: AtomicUsize = AtomicUsize::new(0);
pub fn main() {
fn main() {
let mut a = 0u32;
let b = &mut a as *mut u32;
let c = EvilSend(b);

View file

@ -12,7 +12,7 @@ unsafe impl<T> Sync for EvilSend<T> {}
static SYNC: AtomicUsize = AtomicUsize::new(0);
pub fn main() {
fn main() {
let mut a = 0u32;
let b = &mut a as *mut u32;
let c = EvilSend(b);

View file

@ -12,7 +12,7 @@ unsafe impl<T> Sync for EvilSend<T> {}
static SYNC: AtomicUsize = AtomicUsize::new(0);
pub fn main() {
fn main() {
let mut a = 0u32;
let b = &mut a as *mut u32;
let c = EvilSend(b);

View file

@ -9,7 +9,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
let mut a = 0u32;
let b = &mut a as *mut u32;
let c = EvilSend(b);

View file

@ -12,7 +12,7 @@ struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
fn main() {
// Shared atomic pointer
let pointer = AtomicPtr::new(null_mut::<usize>());
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);

View file

@ -25,7 +25,7 @@ fn set_discriminant(ptr: &mut Option<NonZero<i32>>) {
}
}
pub fn main() {
fn main() {
let mut v = None;
set_discriminant(&mut v);
}

View file

@ -6,7 +6,6 @@
//@compile-flags: -Zmiri-disable-validation
#![feature(custom_mir, core_intrinsics)]
#![allow(unused)]
use std::intrinsics::mir::*;
@ -31,7 +30,8 @@ fn main() {
}
}
pub fn callee(x: S, mut y: S) {
#[expect(unused_variables, unused_assignments)]
fn callee(x: S, mut y: S) {
// With the setup above, if `x` and `y` are both moved,
// then writing to `y` will change the value stored in `x`!
y.0 = 0;

View file

@ -29,6 +29,6 @@ fn main() {
}
}
pub fn callee(x: S) -> S {
fn callee(x: S) -> S {
x
}

View file

@ -22,7 +22,7 @@ fn main() {
}
}
pub fn callee(x: S, ptr: *mut S) {
fn callee(x: S, ptr: *mut S) {
// With the setup above, if `x` is indeed moved in
// (i.e. we actually just get a pointer to the underlying storage),
// then writing to `ptr` will change the value stored in `x`!

View file

@ -23,6 +23,6 @@ fn main() {
}
#[expect(unused_variables, unused_assignments)]
pub fn change_arg(mut x: S) {
fn change_arg(mut x: S) {
x.0 = 0;
}

View file

@ -24,7 +24,7 @@ fn main() {
}
#[expect(unused_variables, unused_assignments)]
pub fn change_arg(mut x: S, ptr: *mut S) {
fn change_arg(mut x: S, ptr: *mut S) {
x.0 = 0;
// If `x` got passed in-place, we'd see the write through `ptr`!
// Make sure we are not allowed to do that read.

View file

@ -7,7 +7,7 @@
use std::intrinsics::mir::*;
#[custom_mir(dialect = "runtime", phase = "optimized")]
pub fn main() {
fn main() {
mir! {
{
let _x = 0;

View file

@ -7,7 +7,7 @@
use std::intrinsics::mir::*;
#[custom_mir(dialect = "runtime", phase = "optimized")]
pub fn main() {
fn main() {
mir! {
{
let _x = 0;

View file

@ -9,7 +9,7 @@
use std::intrinsics::mir::*;
#[custom_mir(dialect = "runtime", phase = "optimized")]
pub fn main() {
fn main() {
mir! {
{
let _x = 0;

View file

@ -9,7 +9,7 @@ pub unsafe extern "C" fn foo(_y: f32, x: __m256) -> __m256 {
x
}
pub fn bar(x: __m256) -> __m256 {
fn bar(x: __m256) -> __m256 {
// The first and second argument get mixed up here since caller
// and callee do not have the same feature flags.
// In Miri, we don't have a concept of "dynamically available feature flags",

View file

@ -1,6 +1,6 @@
#![feature(core_intrinsics)]
pub fn main() {
fn main() {
unsafe {
use std::intrinsics::*;

View file

@ -1,6 +1,6 @@
#![feature(core_intrinsics)]
pub fn main() {
fn main() {
unsafe {
use std::intrinsics::*;

View file

@ -11,7 +11,7 @@ pub struct Meta {
}
impl Meta {
pub fn new() -> Self {
fn new() -> Self {
Meta { drop_fn: |_| {}, size: 0, align: 1 }
}
}

View file

@ -7,7 +7,7 @@ use std::intrinsics::mir::*;
// which wants to prevent overlapping assignments...
// So we use two separate pointer arguments, and then arrange for them to alias.
#[custom_mir(dialect = "runtime", phase = "optimized")]
pub fn self_copy(ptr1: *mut [i32; 4], ptr2: *mut [i32; 4]) {
fn self_copy(ptr1: *mut [i32; 4], ptr2: *mut [i32; 4]) {
mir! {
{
*ptr1 = *ptr2; //~ERROR: overlapping ranges
@ -16,7 +16,7 @@ pub fn self_copy(ptr1: *mut [i32; 4], ptr2: *mut [i32; 4]) {
}
}
pub fn main() {
fn main() {
let mut x = [0; 4];
let ptr = std::ptr::addr_of_mut!(x);
self_copy(ptr, ptr);

View file

@ -16,7 +16,7 @@ LL | panic::catch_unwind(move || unsafe { init(argc, argv, sigpipe) }).map_e
help: the protected tag <TAG> was created here, in the initial state Active
--> RUSTLIB/std/src/panic.rs:LL:CC
|
LL | pub fn catch_unwind<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
LL | fn catch_unwind<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
| ^
= note: BACKTRACE (of the first span):
= note: inside `std::rt::lang_start_internal` at RUSTLIB/std/src/rt.rs:LL:CC

View file

@ -2,7 +2,7 @@
use std::cell::Cell;
pub fn main() {
fn main() {
thread_local! {
static TLS: Cell<Option<&'static i32>> = Cell::new(None);
}

View file

@ -6,7 +6,7 @@ use std::cell::Cell;
/// Ensure that leaks through `thread_local` statics *not in the main thread*
/// are detected.
pub fn main() {
fn main() {
#[thread_local]
static TLS: Cell<Option<&'static i32>> = Cell::new(None);

Some files were not shown because too many files have changed in this diff Show more