Implement std::sync::Mutex interception in GenMC mode.
This commit is contained in:
parent
de97b43432
commit
4203fe84fb
27 changed files with 838 additions and 122 deletions
|
|
@ -233,6 +233,7 @@ fn compile_cpp_dependencies(genmc_path: &Path, always_configure: bool) {
|
|||
let cpp_files = [
|
||||
"MiriInterface/EventHandling.cpp",
|
||||
"MiriInterface/Exploration.cpp",
|
||||
"MiriInterface/Mutex.cpp",
|
||||
"MiriInterface/Setup.cpp",
|
||||
"MiriInterface/ThreadManagement.cpp",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
// GenMC headers:
|
||||
#include "ExecutionGraph/EventLabel.hpp"
|
||||
#include "Static/ModuleID.hpp"
|
||||
#include "Support/MemOrdering.hpp"
|
||||
#include "Support/RMWOps.hpp"
|
||||
#include "Verification/Config.hpp"
|
||||
|
|
@ -36,6 +35,7 @@ struct LoadResult;
|
|||
struct StoreResult;
|
||||
struct ReadModifyWriteResult;
|
||||
struct CompareExchangeResult;
|
||||
struct MutexLockResult;
|
||||
|
||||
// GenMC uses `int` for its thread IDs.
|
||||
using ThreadId = int;
|
||||
|
|
@ -136,10 +136,13 @@ struct MiriGenmcShim : private GenMCDriver {
|
|||
|
||||
/**** Blocking instructions ****/
|
||||
/// Inform GenMC that the thread should be blocked.
|
||||
/// Note: this function is currently hardcoded for `AssumeType::User`, corresponding to user
|
||||
/// supplied assume statements. This can become a parameter once more types of assumes are
|
||||
/// added.
|
||||
void handle_assume_block(ThreadId thread_id);
|
||||
void handle_assume_block(ThreadId thread_id, AssumeType assume_type);
|
||||
|
||||
/**** Mutex handling ****/
|
||||
auto handle_mutex_lock(ThreadId thread_id, uint64_t address, uint64_t size) -> MutexLockResult;
|
||||
auto handle_mutex_try_lock(ThreadId thread_id, uint64_t address, uint64_t size)
|
||||
-> MutexLockResult;
|
||||
auto handle_mutex_unlock(ThreadId thread_id, uint64_t address, uint64_t size) -> StoreResult;
|
||||
|
||||
/***** Exploration related functionality *****/
|
||||
|
||||
|
|
@ -358,4 +361,22 @@ inline CompareExchangeResult from_error(std::unique_ptr<std::string> error) {
|
|||
}
|
||||
} // namespace CompareExchangeResultExt
|
||||
|
||||
namespace MutexLockResultExt {
|
||||
inline MutexLockResult ok(bool is_lock_acquired) {
|
||||
return MutexLockResult { /* error: */ nullptr, /* is_reset: */ false, is_lock_acquired };
|
||||
}
|
||||
|
||||
inline MutexLockResult reset() {
|
||||
return MutexLockResult { /* error: */ nullptr,
|
||||
/* is_reset: */ true,
|
||||
/* is_lock_acquired: */ false };
|
||||
}
|
||||
|
||||
inline MutexLockResult from_error(std::unique_ptr<std::string> error) {
|
||||
return MutexLockResult { /* error: */ std::move(error),
|
||||
/* is_reset: */ false,
|
||||
/* is_lock_acquired: */ false };
|
||||
}
|
||||
} // namespace MutexLockResultExt
|
||||
|
||||
#endif /* GENMC_MIRI_INTERFACE_HPP */
|
||||
|
|
|
|||
|
|
@ -32,8 +32,9 @@
|
|||
|
||||
/**** Blocking instructions ****/
|
||||
|
||||
void MiriGenmcShim::handle_assume_block(ThreadId thread_id) {
|
||||
GenMCDriver::handleAssume(inc_pos(thread_id), AssumeType::User);
|
||||
void MiriGenmcShim::handle_assume_block(ThreadId thread_id, AssumeType assume_type) {
|
||||
BUG_ON(getExec().getGraph().isThreadBlocked(thread_id));
|
||||
GenMCDriver::handleAssume(inc_pos(thread_id), assume_type);
|
||||
}
|
||||
|
||||
/**** Memory access handling ****/
|
||||
|
|
@ -59,6 +60,7 @@ void MiriGenmcShim::handle_assume_block(ThreadId thread_id) {
|
|||
if (const auto* err = std::get_if<VerificationError>(&ret))
|
||||
return LoadResultExt::from_error(format_error(*err));
|
||||
const auto* ret_val = std::get_if<SVal>(&ret);
|
||||
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
|
||||
if (ret_val == nullptr)
|
||||
ERROR("Unimplemented: load returned unexpected result.");
|
||||
return LoadResultExt::from_value(*ret_val);
|
||||
|
|
@ -88,6 +90,7 @@ void MiriGenmcShim::handle_assume_block(ThreadId thread_id) {
|
|||
return StoreResultExt::from_error(format_error(*err));
|
||||
|
||||
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&ret);
|
||||
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
|
||||
ERROR_ON(
|
||||
nullptr == is_coherence_order_maximal_write,
|
||||
"Unimplemented: Store returned unexpected result."
|
||||
|
|
@ -130,6 +133,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
|
|||
return ReadModifyWriteResultExt::from_error(format_error(*err));
|
||||
|
||||
const auto* ret_val = std::get_if<SVal>(&load_ret);
|
||||
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
|
||||
if (nullptr == ret_val) {
|
||||
ERROR("Unimplemented: read-modify-write returned unexpected result.");
|
||||
}
|
||||
|
|
@ -151,6 +155,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
|
|||
return ReadModifyWriteResultExt::from_error(format_error(*err));
|
||||
|
||||
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
|
||||
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
|
||||
ERROR_ON(
|
||||
nullptr == is_coherence_order_maximal_write,
|
||||
"Unimplemented: RMW store returned unexpected result."
|
||||
|
|
@ -195,6 +200,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
|
|||
if (const auto* err = std::get_if<VerificationError>(&load_ret))
|
||||
return CompareExchangeResultExt::from_error(format_error(*err));
|
||||
const auto* ret_val = std::get_if<SVal>(&load_ret);
|
||||
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
|
||||
ERROR_ON(nullptr == ret_val, "Unimplemented: load returned unexpected result.");
|
||||
const auto read_old_val = *ret_val;
|
||||
if (read_old_val != expectedVal)
|
||||
|
|
@ -215,6 +221,7 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
|
|||
if (const auto* err = std::get_if<VerificationError>(&store_ret))
|
||||
return CompareExchangeResultExt::from_error(format_error(*err));
|
||||
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
|
||||
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
|
||||
ERROR_ON(
|
||||
nullptr == is_coherence_order_maximal_write,
|
||||
"Unimplemented: compare-exchange store returned unexpected result."
|
||||
|
|
|
|||
159
src/tools/miri/genmc-sys/cpp/src/MiriInterface/Mutex.cpp
Normal file
159
src/tools/miri/genmc-sys/cpp/src/MiriInterface/Mutex.cpp
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
/** This file contains functionality related to handling mutexes. */
|
||||
|
||||
#include "MiriInterface.hpp"
|
||||
|
||||
// GenMC headers:
|
||||
#include "Static/ModuleID.hpp"
|
||||
|
||||
// CXX.rs generated headers:
|
||||
#include "genmc-sys/src/lib.rs.h"
|
||||
|
||||
#define MUTEX_UNLOCKED SVal(0)
|
||||
#define MUTEX_LOCKED SVal(1)
|
||||
|
||||
auto MiriGenmcShim::handle_mutex_lock(ThreadId thread_id, uint64_t address, uint64_t size)
|
||||
-> MutexLockResult {
|
||||
// This annotation informs GenMC about the condition required to make this lock call succeed.
|
||||
// It stands for `value_read_by_load != MUTEX_LOCKED`.
|
||||
const auto size_bits = size * 8;
|
||||
const auto annot = std::move(Annotation(
|
||||
AssumeType::Spinloop,
|
||||
Annotation::ExprVP(
|
||||
NeExpr<ModuleID::ID>::create(
|
||||
// `RegisterExpr` marks the value of the current expression, i.e., the loaded value.
|
||||
// The `id` is ignored by GenMC; it is only used by the LLI frontend to substitute
|
||||
// other variables from previous expressions that may be used here.
|
||||
RegisterExpr<ModuleID::ID>::create(size_bits, /* id */ 0),
|
||||
ConcreteExpr<ModuleID::ID>::create(size_bits, MUTEX_LOCKED)
|
||||
)
|
||||
.release()
|
||||
)
|
||||
));
|
||||
|
||||
// As usual, we need to tell GenMC which value was stored at this location before this atomic
|
||||
// access, if there previously was a non-atomic initializing access. We set the initial state of
|
||||
// a mutex to be "unlocked".
|
||||
const auto old_val = MUTEX_UNLOCKED;
|
||||
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::LockCasRead>(
|
||||
thread_id,
|
||||
old_val,
|
||||
address,
|
||||
size,
|
||||
annot,
|
||||
EventDeps()
|
||||
);
|
||||
if (const auto* err = std::get_if<VerificationError>(&load_ret))
|
||||
return MutexLockResultExt::from_error(format_error(*err));
|
||||
// If we get a `Reset`, GenMC decided that this lock operation should not yet run, since it
|
||||
// would not acquire the mutex. Like the handling of the case further down where we read a `1`
|
||||
// ("Mutex already locked"), Miri should call the handle function again once the current thread
|
||||
// is scheduled by GenMC the next time.
|
||||
if (std::holds_alternative<Reset>(load_ret))
|
||||
return MutexLockResultExt::reset();
|
||||
|
||||
const auto* ret_val = std::get_if<SVal>(&load_ret);
|
||||
ERROR_ON(!ret_val, "Unimplemented: mutex lock returned unexpected result.");
|
||||
ERROR_ON(
|
||||
*ret_val != MUTEX_UNLOCKED && *ret_val != MUTEX_LOCKED,
|
||||
"Mutex read value was neither 0 nor 1"
|
||||
);
|
||||
const bool is_lock_acquired = *ret_val == MUTEX_UNLOCKED;
|
||||
if (is_lock_acquired) {
|
||||
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::LockCasWrite>(
|
||||
inc_pos(thread_id),
|
||||
old_val,
|
||||
address,
|
||||
size,
|
||||
EventDeps()
|
||||
);
|
||||
if (const auto* err = std::get_if<VerificationError>(&store_ret))
|
||||
return MutexLockResultExt::from_error(format_error(*err));
|
||||
// We don't update Miri's memory for this operation so we don't need to know if the store
|
||||
// was the co-maximal store, but we still check that we at least get a boolean as the result
|
||||
// of the store.
|
||||
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
|
||||
ERROR_ON(
|
||||
nullptr == is_coherence_order_maximal_write,
|
||||
"Unimplemented: store part of mutex try_lock returned unexpected result."
|
||||
);
|
||||
} else {
|
||||
// We did not acquire the mutex, so we tell GenMC to block the thread until we can acquire
|
||||
// it. GenMC determines this based on the annotation we pass with the load further up in
|
||||
// this function, namely when that load will read a value other than `MUTEX_LOCKED`.
|
||||
this->handle_assume_block(thread_id, AssumeType::Spinloop);
|
||||
}
|
||||
return MutexLockResultExt::ok(is_lock_acquired);
|
||||
}
|
||||
|
||||
auto MiriGenmcShim::handle_mutex_try_lock(ThreadId thread_id, uint64_t address, uint64_t size)
|
||||
-> MutexLockResult {
|
||||
auto& currPos = threads_action_[thread_id].event;
|
||||
// As usual, we need to tell GenMC which value was stored at this location before this atomic
|
||||
// access, if there previously was a non-atomic initializing access. We set the initial state of
|
||||
// a mutex to be "unlocked".
|
||||
const auto old_val = MUTEX_UNLOCKED;
|
||||
const auto load_ret = GenMCDriver::handleLoad<EventLabel::EventLabelKind::TrylockCasRead>(
|
||||
++currPos,
|
||||
old_val,
|
||||
SAddr(address),
|
||||
ASize(size)
|
||||
);
|
||||
if (const auto* err = std::get_if<VerificationError>(&load_ret))
|
||||
return MutexLockResultExt::from_error(format_error(*err));
|
||||
const auto* ret_val = std::get_if<SVal>(&load_ret);
|
||||
if (nullptr == ret_val) {
|
||||
ERROR("Unimplemented: mutex trylock load returned unexpected result.");
|
||||
}
|
||||
|
||||
ERROR_ON(
|
||||
*ret_val != MUTEX_UNLOCKED && *ret_val != MUTEX_LOCKED,
|
||||
"Mutex read value was neither 0 nor 1"
|
||||
);
|
||||
const bool is_lock_acquired = *ret_val == MUTEX_UNLOCKED;
|
||||
if (!is_lock_acquired) {
|
||||
return MutexLockResultExt::ok(false); /* Lock already held. */
|
||||
}
|
||||
|
||||
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::TrylockCasWrite>(
|
||||
++currPos,
|
||||
old_val,
|
||||
SAddr(address),
|
||||
ASize(size)
|
||||
);
|
||||
if (const auto* err = std::get_if<VerificationError>(&store_ret))
|
||||
return MutexLockResultExt::from_error(format_error(*err));
|
||||
// We don't update Miri's memory for this operation so we don't need to know if the store was
|
||||
// co-maximal, but we still check that we get a boolean result.
|
||||
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
|
||||
ERROR_ON(
|
||||
nullptr == is_coherence_order_maximal_write,
|
||||
"Unimplemented: store part of mutex try_lock returned unexpected result."
|
||||
);
|
||||
return MutexLockResultExt::ok(true);
|
||||
}
|
||||
|
||||
auto MiriGenmcShim::handle_mutex_unlock(ThreadId thread_id, uint64_t address, uint64_t size)
|
||||
-> StoreResult {
|
||||
const auto pos = inc_pos(thread_id);
|
||||
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::UnlockWrite>(
|
||||
pos,
|
||||
// As usual, we need to tell GenMC which value was stored at this location before this
|
||||
// atomic access, if there previously was a non-atomic initializing access. We set the
|
||||
// initial state of a mutex to be "unlocked".
|
||||
/* old_val */ MUTEX_UNLOCKED,
|
||||
MemOrdering::Release,
|
||||
SAddr(address),
|
||||
ASize(size),
|
||||
AType::Signed,
|
||||
/* store_value */ MUTEX_UNLOCKED,
|
||||
EventDeps()
|
||||
);
|
||||
if (const auto* err = std::get_if<VerificationError>(&ret))
|
||||
return StoreResultExt::from_error(format_error(*err));
|
||||
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&ret);
|
||||
ERROR_ON(
|
||||
nullptr == is_coherence_order_maximal_write,
|
||||
"Unimplemented: store part of mutex unlock returned unexpected result."
|
||||
);
|
||||
return StoreResultExt::ok(*is_coherence_order_maximal_write);
|
||||
}
|
||||
|
|
@ -38,6 +38,7 @@ void MiriGenmcShim::handle_thread_join(ThreadId thread_id, ThreadId child_id) {
|
|||
if (!std::holds_alternative<SVal>(ret)) {
|
||||
dec_pos(thread_id);
|
||||
}
|
||||
// FIXME(genmc): handle `HandleResult::{Invalid, Reset, VerificationError}` return values.
|
||||
|
||||
// NOTE: Thread return value is ignored, since Miri doesn't need it.
|
||||
}
|
||||
|
|
|
|||
|
|
@ -254,6 +254,17 @@ mod ffi {
|
|||
is_coherence_order_maximal_write: bool,
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[derive(Debug)]
|
||||
struct MutexLockResult {
|
||||
/// If there was an error, it will be stored in `error`, otherwise it is `None`.
|
||||
error: UniquePtr<CxxString>,
|
||||
/// If true, GenMC determined that we should retry the mutex lock operation once the thread attempting to lock is scheduled again.
|
||||
is_reset: bool,
|
||||
/// Indicate whether the lock was acquired by this thread.
|
||||
is_lock_acquired: bool,
|
||||
}
|
||||
|
||||
/**** These are GenMC types that we have to copy-paste here since cxx does not support
|
||||
"importing" externally defined C++ types. ****/
|
||||
|
||||
|
|
@ -305,6 +316,13 @@ mod ffi {
|
|||
UMin = 10,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum AssumeType {
|
||||
User = 0,
|
||||
Barrier = 1,
|
||||
Spinloop = 2,
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// This block is unsafe to allow defining safe methods inside.
|
||||
|
|
@ -323,6 +341,7 @@ mod ffi {
|
|||
(This tells cxx that the enums defined above are already defined on the C++ side;
|
||||
it will emit assertions to ensure that the two definitions agree.) ****/
|
||||
type ActionKind;
|
||||
type AssumeType;
|
||||
type MemOrdering;
|
||||
type RMWBinOp;
|
||||
type SchedulePolicy;
|
||||
|
|
@ -430,7 +449,31 @@ mod ffi {
|
|||
/// Inform GenMC that the thread should be blocked.
|
||||
/// Note: this function is currently hardcoded for `AssumeType::User`, corresponding to user supplied assume statements.
|
||||
/// This can become a parameter once more types of assumes are added.
|
||||
fn handle_assume_block(self: Pin<&mut MiriGenmcShim>, thread_id: i32);
|
||||
fn handle_assume_block(
|
||||
self: Pin<&mut MiriGenmcShim>,
|
||||
thread_id: i32,
|
||||
assume_type: AssumeType,
|
||||
);
|
||||
|
||||
/**** Mutex handling ****/
|
||||
fn handle_mutex_lock(
|
||||
self: Pin<&mut MiriGenmcShim>,
|
||||
thread_id: i32,
|
||||
address: u64,
|
||||
size: u64,
|
||||
) -> MutexLockResult;
|
||||
fn handle_mutex_try_lock(
|
||||
self: Pin<&mut MiriGenmcShim>,
|
||||
thread_id: i32,
|
||||
address: u64,
|
||||
size: u64,
|
||||
) -> MutexLockResult;
|
||||
fn handle_mutex_unlock(
|
||||
self: Pin<&mut MiriGenmcShim>,
|
||||
thread_id: i32,
|
||||
address: u64,
|
||||
size: u64,
|
||||
) -> StoreResult;
|
||||
|
||||
/***** Exploration related functionality *****/
|
||||
|
||||
|
|
|
|||
|
|
@ -43,6 +43,15 @@ mod intercept {
|
|||
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn genmc_intercept_function(
|
||||
&mut self,
|
||||
_instance: rustc_middle::ty::Instance<'tcx>,
|
||||
_args: &[rustc_const_eval::interpret::FnArg<'tcx, crate::Provenance>],
|
||||
_dest: &crate::PlaceTy<'tcx>,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn handle_genmc_verifier_assume(&mut self, _condition: &OpTy<'tcx>) -> InterpResult<'tcx> {
|
||||
unreachable!();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
use tracing::debug;
|
||||
|
||||
use crate::concurrency::thread::EvalContextExt as _;
|
||||
use crate::{
|
||||
BlockReason, InterpResult, MachineCallback, MiriInterpCx, OpTy, UnblockKind, VisitProvenance,
|
||||
VisitWith, callback, interp_ok,
|
||||
};
|
||||
|
||||
// Handling of code intercepted by Miri in GenMC mode, such as assume statement or `std::sync::Mutex`.
|
||||
|
||||
/// Other functionality not directly related to event handling
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
/// Handle an `assume` statement. This will tell GenMC to block the current thread if the `condition` is false.
|
||||
/// Returns `true` if the current thread should be blocked in Miri too.
|
||||
fn handle_genmc_verifier_assume(&mut self, condition: &OpTy<'tcx>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let condition_bool = this.read_scalar(condition)?.to_bool()?;
|
||||
debug!("GenMC: handle_genmc_verifier_assume, condition: {condition:?} = {condition_bool}");
|
||||
if condition_bool {
|
||||
return interp_ok(());
|
||||
}
|
||||
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
|
||||
genmc_ctx.handle_assume_block(&this.machine)?;
|
||||
this.block_thread(
|
||||
BlockReason::Genmc,
|
||||
None,
|
||||
callback!(
|
||||
@capture<'tcx> {}
|
||||
|_this, unblock: UnblockKind| {
|
||||
assert_eq!(unblock, UnblockKind::Ready);
|
||||
unreachable!("GenMC should never unblock a thread blocked by an `assume`.");
|
||||
}
|
||||
),
|
||||
);
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -27,16 +27,16 @@ use crate::*;
|
|||
mod config;
|
||||
mod global_allocations;
|
||||
mod helper;
|
||||
mod intercept;
|
||||
mod run;
|
||||
pub(crate) mod scheduling;
|
||||
mod shims;
|
||||
mod thread_id_map;
|
||||
|
||||
pub use genmc_sys::GenmcParams;
|
||||
|
||||
pub use self::config::GenmcConfig;
|
||||
pub use self::intercept::EvalContextExt as GenmcEvalContextExt;
|
||||
pub use self::run::run_genmc_mode;
|
||||
pub use self::shims::EvalContextExt as GenmcEvalContextExt;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExecutionEndResult {
|
||||
|
|
@ -200,6 +200,14 @@ impl GenmcCtx {
|
|||
fn get_alloc_data_races(&self) -> bool {
|
||||
self.exec_state.allow_data_races.get()
|
||||
}
|
||||
|
||||
/// Get the GenMC id of the currently active thread.
|
||||
#[must_use]
|
||||
fn active_thread_genmc_tid<'tcx>(&self, machine: &MiriMachine<'tcx>) -> i32 {
|
||||
let thread_infos = self.exec_state.thread_id_manager.borrow();
|
||||
let curr_thread = machine.threads.active_thread();
|
||||
thread_infos.get_genmc_tid(curr_thread)
|
||||
}
|
||||
}
|
||||
|
||||
/// GenMC event handling. These methods are used to inform GenMC about events happening in the program, and to handle scheduling decisions.
|
||||
|
|
@ -309,12 +317,10 @@ impl GenmcCtx {
|
|||
ordering: AtomicFenceOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
assert!(!self.get_alloc_data_races(), "atomic fence with data race checking disabled.");
|
||||
|
||||
let thread_infos = self.exec_state.thread_id_manager.borrow();
|
||||
let curr_thread = machine.threads.active_thread();
|
||||
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
|
||||
|
||||
self.handle.borrow_mut().pin_mut().handle_fence(genmc_tid, ordering.to_genmc());
|
||||
self.handle
|
||||
.borrow_mut()
|
||||
.pin_mut()
|
||||
.handle_fence(self.active_thread_genmc_tid(machine), ordering.to_genmc());
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
|
|
@ -425,12 +431,8 @@ impl GenmcCtx {
|
|||
debug!(
|
||||
"GenMC: atomic_compare_exchange, address: {address:?}, size: {size:?} (expect: {expected_old_value:?}, new: {new_value:?}, old_value: {old_value:?}, {success:?}, orderings: {fail:?}), can fail spuriously: {can_fail_spuriously}"
|
||||
);
|
||||
|
||||
let thread_infos = self.exec_state.thread_id_manager.borrow();
|
||||
let genmc_tid = thread_infos.get_genmc_tid(ecx.machine.threads.active_thread());
|
||||
|
||||
let cas_result = self.handle.borrow_mut().pin_mut().handle_compare_exchange(
|
||||
genmc_tid,
|
||||
self.active_thread_genmc_tid(&ecx.machine),
|
||||
address.bytes(),
|
||||
size.bytes(),
|
||||
scalar_to_genmc_scalar(ecx, self, expected_old_value)?,
|
||||
|
|
@ -591,14 +593,10 @@ impl GenmcCtx {
|
|||
return ecx
|
||||
.get_global_allocation_address(&self.global_state.global_allocations, alloc_id);
|
||||
}
|
||||
let thread_infos = self.exec_state.thread_id_manager.borrow();
|
||||
let curr_thread = machine.threads.active_thread();
|
||||
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
|
||||
// GenMC doesn't support ZSTs, so we set the minimum size to 1 byte
|
||||
let genmc_size = size.bytes().max(1);
|
||||
|
||||
let chosen_address = self.handle.borrow_mut().pin_mut().handle_malloc(
|
||||
genmc_tid,
|
||||
self.active_thread_genmc_tid(machine),
|
||||
genmc_size,
|
||||
alignment.bytes(),
|
||||
);
|
||||
|
|
@ -632,11 +630,12 @@ impl GenmcCtx {
|
|||
!self.get_alloc_data_races(),
|
||||
"memory deallocation with data race checking disabled."
|
||||
);
|
||||
let thread_infos = self.exec_state.thread_id_manager.borrow();
|
||||
let curr_thread = machine.threads.active_thread();
|
||||
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
|
||||
|
||||
if self.handle.borrow_mut().pin_mut().handle_free(genmc_tid, address.bytes()) {
|
||||
if self
|
||||
.handle
|
||||
.borrow_mut()
|
||||
.pin_mut()
|
||||
.handle_free(self.active_thread_genmc_tid(machine), address.bytes())
|
||||
{
|
||||
// FIXME(genmc): improve error handling.
|
||||
// An error was detected, so we get the error string from GenMC.
|
||||
throw_ub_format!("{}", self.try_get_error().unwrap());
|
||||
|
|
@ -690,7 +689,7 @@ impl GenmcCtx {
|
|||
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
|
||||
|
||||
debug!("GenMC: thread {curr_thread_id:?} ({genmc_tid:?}) finished.");
|
||||
// NOTE: Miri doesn't support return values for threads, but GenMC expects one, so we return 0
|
||||
// NOTE: Miri doesn't support return values for threads, but GenMC expects one, so we return 0.
|
||||
self.handle.borrow_mut().pin_mut().handle_thread_finish(genmc_tid, /* ret_val */ 0);
|
||||
}
|
||||
|
||||
|
|
@ -752,17 +751,12 @@ impl GenmcCtx {
|
|||
"GenMC mode currently does not support atomics larger than {MAX_ACCESS_SIZE} bytes.",
|
||||
);
|
||||
}
|
||||
let thread_infos = self.exec_state.thread_id_manager.borrow();
|
||||
let curr_thread_id = machine.threads.active_thread();
|
||||
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
|
||||
|
||||
debug!(
|
||||
"GenMC: load, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} == {addr:#x}, size: {size:?}, ordering: {memory_ordering:?}, old_value: {genmc_old_value:x?}",
|
||||
"GenMC: load, address: {addr} == {addr:#x}, size: {size:?}, ordering: {memory_ordering:?}, old_value: {genmc_old_value:x?}",
|
||||
addr = address.bytes()
|
||||
);
|
||||
|
||||
let load_result = self.handle.borrow_mut().pin_mut().handle_load(
|
||||
genmc_tid,
|
||||
self.active_thread_genmc_tid(machine),
|
||||
address.bytes(),
|
||||
size.bytes(),
|
||||
memory_ordering,
|
||||
|
|
@ -803,17 +797,12 @@ impl GenmcCtx {
|
|||
"GenMC mode currently does not support atomics larger than {MAX_ACCESS_SIZE} bytes."
|
||||
);
|
||||
}
|
||||
let thread_infos = self.exec_state.thread_id_manager.borrow();
|
||||
let curr_thread_id = machine.threads.active_thread();
|
||||
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
|
||||
|
||||
debug!(
|
||||
"GenMC: store, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} = {addr:#x}, size: {size:?}, ordering {memory_ordering:?}, value: {genmc_value:?}",
|
||||
"GenMC: store, address: {addr} = {addr:#x}, size: {size:?}, ordering {memory_ordering:?}, value: {genmc_value:?}",
|
||||
addr = address.bytes()
|
||||
);
|
||||
|
||||
let store_result = self.handle.borrow_mut().pin_mut().handle_store(
|
||||
genmc_tid,
|
||||
self.active_thread_genmc_tid(machine),
|
||||
address.bytes(),
|
||||
size.bytes(),
|
||||
genmc_value,
|
||||
|
|
@ -854,14 +843,11 @@ impl GenmcCtx {
|
|||
MAX_ACCESS_SIZE,
|
||||
size.bytes()
|
||||
);
|
||||
|
||||
let curr_thread_id = ecx.machine.threads.active_thread();
|
||||
let genmc_tid = self.exec_state.thread_id_manager.borrow().get_genmc_tid(curr_thread_id);
|
||||
debug!(
|
||||
"GenMC: atomic_rmw_op, thread: {curr_thread_id:?} ({genmc_tid:?}) (op: {genmc_rmw_op:?}, rhs value: {genmc_rhs_scalar:?}), address: {address:?}, size: {size:?}, ordering: {ordering:?}",
|
||||
"GenMC: atomic_rmw_op (op: {genmc_rmw_op:?}, rhs value: {genmc_rhs_scalar:?}), address: {address:?}, size: {size:?}, ordering: {ordering:?}",
|
||||
);
|
||||
let rmw_result = self.handle.borrow_mut().pin_mut().handle_read_modify_write(
|
||||
genmc_tid,
|
||||
self.active_thread_genmc_tid(&ecx.machine),
|
||||
address.bytes(),
|
||||
size.bytes(),
|
||||
genmc_rmw_op,
|
||||
|
|
@ -884,20 +870,6 @@ impl GenmcCtx {
|
|||
};
|
||||
interp_ok((old_value_scalar, new_value_scalar))
|
||||
}
|
||||
|
||||
/**** Blocking functionality ****/
|
||||
|
||||
/// Handle a user thread getting blocked.
|
||||
/// This may happen due to an manual `assume` statement added by a user
|
||||
/// or added by some automated program transformation, e.g., for spinloops.
|
||||
fn handle_assume_block<'tcx>(&self, machine: &MiriMachine<'tcx>) -> InterpResult<'tcx> {
|
||||
let curr_thread = machine.threads.active_thread();
|
||||
let genmc_curr_thread =
|
||||
self.exec_state.thread_id_manager.borrow().get_genmc_tid(curr_thread);
|
||||
debug!("GenMC: assume statement, blocking thread {curr_thread:?} ({genmc_curr_thread:?})");
|
||||
self.handle.borrow_mut().pin_mut().handle_assume_block(genmc_curr_thread);
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl VisitProvenance for GenmcCtx {
|
||||
|
|
|
|||
|
|
@ -63,15 +63,25 @@ fn get_function_kind<'tcx>(
|
|||
) -> InterpResult<'tcx, NextInstrKind> {
|
||||
use NextInstrKind::*;
|
||||
let callee_def_id = match func_ty.kind() {
|
||||
ty::FnDef(def_id, _args) => def_id,
|
||||
ty::FnDef(def_id, _args) => *def_id,
|
||||
_ => return interp_ok(MaybeAtomic(ActionKind::Load)), // we don't know the callee, might be pthread_join
|
||||
};
|
||||
let Some(intrinsic_def) = ecx.tcx.intrinsic(callee_def_id) else {
|
||||
if ecx.tcx.is_foreign_item(*callee_def_id) {
|
||||
if ecx.tcx.is_foreign_item(callee_def_id) {
|
||||
// Some shims, like pthread_join, must be considered loads. So just consider them all loads,
|
||||
// these calls are not *that* common.
|
||||
return interp_ok(MaybeAtomic(ActionKind::Load));
|
||||
}
|
||||
// NOTE: Functions intercepted by Miri in `concurrency/genmc/intercep.rs` must also be added here.
|
||||
// Such intercepted functions, like `sys::Mutex::lock`, should be treated as atomics to ensure we call the scheduler when we encounter one of them.
|
||||
// These functions must also be classified whether they may have load semantics.
|
||||
if ecx.tcx.is_diagnostic_item(rustc_span::sym::sys_mutex_lock, callee_def_id)
|
||||
|| ecx.tcx.is_diagnostic_item(rustc_span::sym::sys_mutex_try_lock, callee_def_id)
|
||||
{
|
||||
return interp_ok(MaybeAtomic(ActionKind::Load));
|
||||
} else if ecx.tcx.is_diagnostic_item(rustc_span::sym::sys_mutex_unlock, callee_def_id) {
|
||||
return interp_ok(MaybeAtomic(ActionKind::NonLoad));
|
||||
}
|
||||
// The next step is a call to a regular Rust function.
|
||||
return interp_ok(NonAtomic);
|
||||
};
|
||||
|
|
|
|||
234
src/tools/miri/src/concurrency/genmc/shims.rs
Normal file
234
src/tools/miri/src/concurrency/genmc/shims.rs
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
use genmc_sys::AssumeType;
|
||||
use rustc_middle::ty;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::concurrency::genmc::MAX_ACCESS_SIZE;
|
||||
use crate::concurrency::thread::EvalContextExt as _;
|
||||
use crate::*;
|
||||
|
||||
impl GenmcCtx {
|
||||
/// Handle a user thread getting blocked.
|
||||
/// This may happen due to an manual `assume` statement added by a user
|
||||
/// or added by some automated program transformation, e.g., for spinloops.
|
||||
fn handle_assume_block<'tcx>(
|
||||
&self,
|
||||
machine: &MiriMachine<'tcx>,
|
||||
assume_type: AssumeType,
|
||||
) -> InterpResult<'tcx> {
|
||||
debug!("GenMC: assume statement, blocking active thread.");
|
||||
self.handle
|
||||
.borrow_mut()
|
||||
.pin_mut()
|
||||
.handle_assume_block(self.active_thread_genmc_tid(machine), assume_type);
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Handling of code intercepted by Miri in GenMC mode, such as assume statement or `std::sync::Mutex`.
|
||||
|
||||
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
/// Small helper to get the arguments of an intercepted function call.
|
||||
fn get_fn_args<const N: usize>(
|
||||
&self,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[FnArg<'tcx>],
|
||||
) -> InterpResult<'tcx, [OpTy<'tcx>; N]> {
|
||||
let this = self.eval_context_ref();
|
||||
let args = this.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
|
||||
if let Ok(ops) = args.try_into() {
|
||||
return interp_ok(ops);
|
||||
}
|
||||
panic!("{} is a diagnostic item expected to have {} arguments", instance, N);
|
||||
}
|
||||
|
||||
/**** Blocking functionality ****/
|
||||
|
||||
/// Handle a thread getting blocked by a user assume (not an automatically generated assume).
|
||||
/// Unblocking this thread in the current execution will cause a panic.
|
||||
/// Miri does not provide GenMC with the annotations to determine when to unblock the thread, so it should never be unblocked.
|
||||
fn handle_user_assume_block(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
debug!(
|
||||
"GenMC: block thread {:?} due to failing assume statement.",
|
||||
this.machine.threads.active_thread()
|
||||
);
|
||||
assert!(this.machine.threads.active_thread_ref().is_enabled());
|
||||
// Block the thread on the GenMC side.
|
||||
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
|
||||
genmc_ctx.handle_assume_block(&this.machine, AssumeType::User)?;
|
||||
// Block the thread on the Miri side.
|
||||
this.block_thread(
|
||||
BlockReason::Genmc,
|
||||
None,
|
||||
callback!(
|
||||
@capture<'tcx> {}
|
||||
|_this, unblock: UnblockKind| {
|
||||
assert_eq!(unblock, UnblockKind::Ready);
|
||||
unreachable!("GenMC should never unblock a thread blocked by an `assume`.");
|
||||
}
|
||||
),
|
||||
);
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn intercept_mutex_lock(&mut self, mutex: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
|
||||
debug!("GenMC: handling Mutex::lock()");
|
||||
let this = self.eval_context_mut();
|
||||
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
|
||||
|
||||
let size = mutex.layout.size.bytes();
|
||||
assert!(
|
||||
size <= MAX_ACCESS_SIZE,
|
||||
"Mutex is larger than maximal size of a memory access supported by GenMC ({size} > {MAX_ACCESS_SIZE})"
|
||||
);
|
||||
let result = genmc_ctx.handle.borrow_mut().pin_mut().handle_mutex_lock(
|
||||
genmc_ctx.active_thread_genmc_tid(&this.machine),
|
||||
mutex.ptr().addr().bytes(),
|
||||
size,
|
||||
);
|
||||
if let Some(error) = result.error.as_ref() {
|
||||
// FIXME(genmc): improve error handling.
|
||||
throw_ub_format!("{}", error.to_string_lossy());
|
||||
}
|
||||
if result.is_reset {
|
||||
debug!("GenMC: Mutex::lock: Reset");
|
||||
// GenMC informed us to reset and try the lock again later.
|
||||
// We block the current thread until GenMC schedules it again.
|
||||
this.block_thread(
|
||||
crate::BlockReason::Genmc,
|
||||
None,
|
||||
crate::callback!(
|
||||
@capture<'tcx> {
|
||||
mutex: MPlaceTy<'tcx>,
|
||||
}
|
||||
|this, unblock: crate::UnblockKind| {
|
||||
debug!("GenMC: Mutex::lock: unblocking callback called, attempting to lock the Mutex again.");
|
||||
assert_eq!(unblock, crate::UnblockKind::Ready);
|
||||
this.intercept_mutex_lock(mutex)?;
|
||||
interp_ok(())
|
||||
}
|
||||
),
|
||||
);
|
||||
} else if result.is_lock_acquired {
|
||||
debug!("GenMC: Mutex::lock successfully acquired the Mutex.");
|
||||
} else {
|
||||
debug!("GenMC: Mutex::lock failed to acquire the Mutex, permanently blocking thread.");
|
||||
// NOTE: `handle_mutex_lock` already blocked the current thread on the GenMC side.
|
||||
this.block_thread(
|
||||
crate::BlockReason::Genmc,
|
||||
None,
|
||||
crate::callback!(
|
||||
@capture<'tcx> {
|
||||
mutex: MPlaceTy<'tcx>,
|
||||
}
|
||||
|_this, _unblock: crate::UnblockKind| {
|
||||
unreachable!("A thread blocked on `Mutex::lock` should not be unblocked again.");
|
||||
}
|
||||
),
|
||||
);
|
||||
}
|
||||
// NOTE: We don't write anything back to Miri's memory where the Mutex is located, that state is handled only by GenMC.
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn intercept_mutex_try_lock(
|
||||
&mut self,
|
||||
mutex: MPlaceTy<'tcx>,
|
||||
dest: &crate::PlaceTy<'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
debug!("GenMC: handling Mutex::try_lock()");
|
||||
let this = self.eval_context_mut();
|
||||
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
|
||||
let size = mutex.layout.size.bytes();
|
||||
assert!(
|
||||
size <= MAX_ACCESS_SIZE,
|
||||
"Mutex is larger than maximal size of a memory access supported by GenMC ({size} > {MAX_ACCESS_SIZE})"
|
||||
);
|
||||
let result = genmc_ctx.handle.borrow_mut().pin_mut().handle_mutex_try_lock(
|
||||
genmc_ctx.active_thread_genmc_tid(&this.machine),
|
||||
mutex.ptr().addr().bytes(),
|
||||
size,
|
||||
);
|
||||
if let Some(error) = result.error.as_ref() {
|
||||
// FIXME(genmc): improve error handling.
|
||||
throw_ub_format!("{}", error.to_string_lossy());
|
||||
}
|
||||
debug!(
|
||||
"GenMC: Mutex::try_lock(): is_reset: {}, is_lock_acquired: {}",
|
||||
result.is_reset, result.is_lock_acquired
|
||||
);
|
||||
assert!(!result.is_reset, "GenMC returned 'reset' for a mutex try_lock.");
|
||||
// Write the return value of try_lock, i.e., whether we acquired the mutex.
|
||||
this.write_scalar(Scalar::from_bool(result.is_lock_acquired), dest)?;
|
||||
// NOTE: We don't write anything back to Miri's memory where the Mutex is located, that state is handled only by GenMC.
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn intercept_mutex_unlock(&self, mutex: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
|
||||
debug!("GenMC: handling Mutex::unlock()");
|
||||
let this = self.eval_context_ref();
|
||||
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
|
||||
let result = genmc_ctx.handle.borrow_mut().pin_mut().handle_mutex_unlock(
|
||||
genmc_ctx.active_thread_genmc_tid(&this.machine),
|
||||
mutex.ptr().addr().bytes(),
|
||||
mutex.layout.size.bytes(),
|
||||
);
|
||||
if let Some(error) = result.error.as_ref() {
|
||||
// FIXME(genmc): improve error handling.
|
||||
throw_ub_format!("{}", error.to_string_lossy());
|
||||
}
|
||||
// NOTE: We don't write anything back to Miri's memory where the Mutex is located, that state is handled only by GenMC.}
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
/// Given a `ty::Instance<'tcx>`, do any required special handling.
|
||||
/// Returns true if this `instance` should be skipped (i.e., no MIR should be executed for it).
|
||||
fn genmc_intercept_function(
|
||||
&mut self,
|
||||
instance: rustc_middle::ty::Instance<'tcx>,
|
||||
args: &[rustc_const_eval::interpret::FnArg<'tcx, crate::Provenance>],
|
||||
dest: &crate::PlaceTy<'tcx>,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(
|
||||
this.machine.data_race.as_genmc_ref().is_some(),
|
||||
"This function should only be called in GenMC mode."
|
||||
);
|
||||
|
||||
// NOTE: When adding new intercepted functions here, they must also be added to `fn get_function_kind` in `concurrency/genmc/scheduling.rs`.
|
||||
use rustc_span::sym;
|
||||
if this.tcx.is_diagnostic_item(sym::sys_mutex_lock, instance.def_id()) {
|
||||
let [mutex] = this.get_fn_args(instance, args)?;
|
||||
let mutex = this.deref_pointer(&mutex)?;
|
||||
this.intercept_mutex_lock(mutex)?;
|
||||
} else if this.tcx.is_diagnostic_item(sym::sys_mutex_try_lock, instance.def_id()) {
|
||||
let [mutex] = this.get_fn_args(instance, args)?;
|
||||
let mutex = this.deref_pointer(&mutex)?;
|
||||
this.intercept_mutex_try_lock(mutex, dest)?;
|
||||
} else if this.tcx.is_diagnostic_item(sym::sys_mutex_unlock, instance.def_id()) {
|
||||
let [mutex] = this.get_fn_args(instance, args)?;
|
||||
let mutex = this.deref_pointer(&mutex)?;
|
||||
this.intercept_mutex_unlock(mutex)?;
|
||||
} else {
|
||||
// Nothing to intercept.
|
||||
return interp_ok(false);
|
||||
}
|
||||
interp_ok(true)
|
||||
}
|
||||
|
||||
/// Handle an `assume` statement. This will tell GenMC to block the current thread if the `condition` is false.
|
||||
/// Returns `true` if the current thread should be blocked in Miri too.
|
||||
fn handle_genmc_verifier_assume(&mut self, condition: &OpTy<'tcx>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let condition_bool = this.read_scalar(condition)?.to_bool()?;
|
||||
debug!("GenMC: handle_genmc_verifier_assume, condition: {condition:?} = {condition_bool}");
|
||||
if condition_bool {
|
||||
return interp_ok(());
|
||||
}
|
||||
this.handle_user_assume_block()
|
||||
}
|
||||
}
|
||||
|
|
@ -708,21 +708,31 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
|
|||
let this = self.eval_context_mut();
|
||||
|
||||
// In GenMC mode, we let GenMC do the scheduling.
|
||||
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
|
||||
let Some(next_thread_id) = genmc_ctx.schedule_thread(this)? else {
|
||||
return interp_ok(SchedulingAction::ExecuteStep);
|
||||
};
|
||||
// If a thread is blocked on GenMC, we have to implicitly unblock it when it gets scheduled again.
|
||||
if this.machine.threads.threads[next_thread_id].state.is_blocked_on(BlockReason::Genmc)
|
||||
{
|
||||
info!("GenMC: scheduling blocked thread {next_thread_id:?}, so we unblock it now.");
|
||||
this.unblock_thread(next_thread_id, BlockReason::Genmc)?;
|
||||
if this.machine.data_race.as_genmc_ref().is_some() {
|
||||
loop {
|
||||
let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
|
||||
let Some(next_thread_id) = genmc_ctx.schedule_thread(this)? else {
|
||||
return interp_ok(SchedulingAction::ExecuteStep);
|
||||
};
|
||||
// If a thread is blocked on GenMC, we have to implicitly unblock it when it gets scheduled again.
|
||||
if this.machine.threads.threads[next_thread_id]
|
||||
.state
|
||||
.is_blocked_on(BlockReason::Genmc)
|
||||
{
|
||||
info!(
|
||||
"GenMC: scheduling blocked thread {next_thread_id:?}, so we unblock it now."
|
||||
);
|
||||
this.unblock_thread(next_thread_id, BlockReason::Genmc)?;
|
||||
}
|
||||
// The thread we just unblocked may have been blocked again during the unblocking callback.
|
||||
// In that case, we need to ask for a different thread to run next.
|
||||
let thread_manager = &mut this.machine.threads;
|
||||
if thread_manager.threads[next_thread_id].state.is_enabled() {
|
||||
// Set the new active thread.
|
||||
thread_manager.active_thread = next_thread_id;
|
||||
return interp_ok(SchedulingAction::ExecuteStep);
|
||||
}
|
||||
}
|
||||
// Set the new active thread.
|
||||
let thread_manager = &mut this.machine.threads;
|
||||
thread_manager.active_thread = next_thread_id;
|
||||
assert!(thread_manager.threads[thread_manager.active_thread].state.is_enabled());
|
||||
return interp_ok(SchedulingAction::ExecuteStep);
|
||||
}
|
||||
|
||||
// We are not in GenMC mode, so we control the scheduling.
|
||||
|
|
|
|||
|
|
@ -110,6 +110,7 @@ pub type StrictPointer = interpret::Pointer<machine::Provenance>;
|
|||
pub type Scalar = interpret::Scalar<machine::Provenance>;
|
||||
pub type ImmTy<'tcx> = interpret::ImmTy<'tcx, machine::Provenance>;
|
||||
pub type OpTy<'tcx> = interpret::OpTy<'tcx, machine::Provenance>;
|
||||
pub type FnArg<'tcx> = interpret::FnArg<'tcx, machine::Provenance>;
|
||||
pub type PlaceTy<'tcx> = interpret::PlaceTy<'tcx, machine::Provenance>;
|
||||
pub type MPlaceTy<'tcx> = interpret::MPlaceTy<'tcx, machine::Provenance>;
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,9 @@ use rustc_target::callconv::FnAbi;
|
|||
use crate::alloc_addresses::EvalContextExt;
|
||||
use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
|
||||
use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
|
||||
use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
|
||||
use crate::concurrency::{
|
||||
AllocDataRaceHandler, GenmcCtx, GenmcEvalContextExt as _, GlobalDataRaceHandler, weak_memory,
|
||||
};
|
||||
use crate::*;
|
||||
|
||||
/// First real-time signal.
|
||||
|
|
@ -1163,7 +1165,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
|
|||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
abi: &FnAbi<'tcx, Ty<'tcx>>,
|
||||
args: &[FnArg<'tcx, Provenance>],
|
||||
args: &[FnArg<'tcx>],
|
||||
dest: &PlaceTy<'tcx>,
|
||||
ret: Option<mir::BasicBlock>,
|
||||
unwind: mir::UnwindAction,
|
||||
|
|
@ -1182,6 +1184,13 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
|
|||
return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
|
||||
}
|
||||
|
||||
if ecx.machine.data_race.as_genmc_ref().is_some()
|
||||
&& ecx.genmc_intercept_function(instance, args, dest)?
|
||||
{
|
||||
ecx.return_to_block(ret)?;
|
||||
return interp_ok(None);
|
||||
}
|
||||
|
||||
// Otherwise, load the MIR.
|
||||
let _trace = enter_trace_span!("load_mir");
|
||||
interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
|
||||
|
|
@ -1192,7 +1201,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
|
|||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
fn_val: DynSym,
|
||||
abi: &FnAbi<'tcx, Ty<'tcx>>,
|
||||
args: &[FnArg<'tcx, Provenance>],
|
||||
args: &[FnArg<'tcx>],
|
||||
dest: &PlaceTy<'tcx>,
|
||||
ret: Option<mir::BasicBlock>,
|
||||
unwind: mir::UnwindAction,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
|
||||
//@error-in-other-file: Undefined Behavior
|
||||
|
||||
// Test that GenMC throws an error if a `std::sync::Mutex` is unlocked from a different thread than the one that locked it.
|
||||
//
|
||||
// This test will cause an error on all targets, even mutexes on that targets allow for unlocking on a different thread.
|
||||
// GenMC always assumes a `pthread`-like API.
|
||||
|
||||
#![no_main]
|
||||
|
||||
use std::sync::Mutex;
|
||||
|
||||
static MUTEX: Mutex<u64> = Mutex::new(0);
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
|
||||
let guard = EvilSend(MUTEX.lock().unwrap());
|
||||
let handle = std::thread::spawn(move || {
|
||||
let guard = guard; // avoid field capturing
|
||||
drop(guard);
|
||||
});
|
||||
handle.join().unwrap();
|
||||
0
|
||||
}
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
Running GenMC Verification...
|
||||
warning: GenMC currently does not model the failure ordering for `compare_exchange`. Due to success ordering 'Acquire', the failure ordering 'Relaxed' is treated like 'Acquire'. Miri with GenMC might miss bugs related to this memory access.
|
||||
--> RUSTLIB/std/src/sys/sync/PLATFORM/futex.rs:LL:CC
|
||||
|
|
||||
LL | || self
|
||||
| ________________^
|
||||
LL | | .state
|
||||
LL | | .compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed)
|
||||
| |____________________________________________________________________________________^ GenMC might miss possible behaviors of this code
|
||||
|
|
||||
= note: BACKTRACE:
|
||||
= note: inside `std::sys::sync::PLATFORM::futex::RwLock::read` at RUSTLIB/std/src/sys/sync/PLATFORM/futex.rs:LL:CC
|
||||
= note: inside `std::sync::RwLock::<()>::read` at RUSTLIB/std/src/sync/poison/rwlock.rs:LL:CC
|
||||
= note: inside `std::sys::env::PLATFORM::env_read_lock` at RUSTLIB/std/src/sys/env/PLATFORM.rs:LL:CC
|
||||
= note: inside closure at RUSTLIB/std/src/sys/env/PLATFORM.rs:LL:CC
|
||||
= note: inside `std::sys::pal::PLATFORM::small_c_string::run_with_cstr_stack::<std::option::Option<std::ffi::OsString>>` at RUSTLIB/std/src/sys/pal/PLATFORM/small_c_string.rs:LL:CC
|
||||
= note: inside `std::sys::pal::PLATFORM::small_c_string::run_with_cstr::<std::option::Option<std::ffi::OsString>>` at RUSTLIB/std/src/sys/pal/PLATFORM/small_c_string.rs:LL:CC
|
||||
= note: inside `std::sys::env::PLATFORM::getenv` at RUSTLIB/std/src/sys/env/PLATFORM.rs:LL:CC
|
||||
= note: inside `std::env::_var_os` at RUSTLIB/std/src/env.rs:LL:CC
|
||||
= note: inside `std::env::var_os::<&str>` at RUSTLIB/std/src/env.rs:LL:CC
|
||||
= note: inside closure at RUSTLIB/std/src/thread/mod.rs:LL:CC
|
||||
note: inside `miri_start`
|
||||
--> tests/genmc/fail/shims/mutex_diff_thread_unlock.rs:LL:CC
|
||||
|
|
||||
LL | let handle = std::thread::spawn(move || {
|
||||
| __________________^
|
||||
LL | | let guard = guard; // avoid field capturing
|
||||
LL | | drop(guard);
|
||||
LL | | });
|
||||
| |______^
|
||||
|
||||
warning: GenMC currently does not model spurious failures of `compare_exchange_weak`. Miri with GenMC might miss bugs related to spurious failures.
|
||||
--> RUSTLIB/std/src/sys/sync/PLATFORM/futex.rs:LL:CC
|
||||
|
|
||||
LL | || self
|
||||
| ________________^
|
||||
LL | | .state
|
||||
LL | | .compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed)
|
||||
| |____________________________________________________________________________________^ GenMC might miss possible behaviors of this code
|
||||
|
|
||||
= note: BACKTRACE:
|
||||
= note: inside `std::sys::sync::PLATFORM::futex::RwLock::read` at RUSTLIB/std/src/sys/sync/PLATFORM/futex.rs:LL:CC
|
||||
= note: inside `std::sync::RwLock::<()>::read` at RUSTLIB/std/src/sync/poison/rwlock.rs:LL:CC
|
||||
= note: inside `std::sys::env::PLATFORM::env_read_lock` at RUSTLIB/std/src/sys/env/PLATFORM.rs:LL:CC
|
||||
= note: inside closure at RUSTLIB/std/src/sys/env/PLATFORM.rs:LL:CC
|
||||
= note: inside `std::sys::pal::PLATFORM::small_c_string::run_with_cstr_stack::<std::option::Option<std::ffi::OsString>>` at RUSTLIB/std/src/sys/pal/PLATFORM/small_c_string.rs:LL:CC
|
||||
= note: inside `std::sys::pal::PLATFORM::small_c_string::run_with_cstr::<std::option::Option<std::ffi::OsString>>` at RUSTLIB/std/src/sys/pal/PLATFORM/small_c_string.rs:LL:CC
|
||||
= note: inside `std::sys::env::PLATFORM::getenv` at RUSTLIB/std/src/sys/env/PLATFORM.rs:LL:CC
|
||||
= note: inside `std::env::_var_os` at RUSTLIB/std/src/env.rs:LL:CC
|
||||
= note: inside `std::env::var_os::<&str>` at RUSTLIB/std/src/env.rs:LL:CC
|
||||
= note: inside closure at RUSTLIB/std/src/thread/mod.rs:LL:CC
|
||||
note: inside `miri_start`
|
||||
--> tests/genmc/fail/shims/mutex_diff_thread_unlock.rs:LL:CC
|
||||
|
|
||||
LL | let handle = std::thread::spawn(move || {
|
||||
| __________________^
|
||||
LL | | let guard = guard; // avoid field capturing
|
||||
LL | | drop(guard);
|
||||
LL | | });
|
||||
| |______^
|
||||
|
||||
error: Undefined Behavior: Invalid unlock() operation
|
||||
--> RUSTLIB/std/src/sync/poison/mutex.rs:LL:CC
|
||||
|
|
||||
LL | self.lock.inner.unlock();
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: BACKTRACE on thread `unnamed-ID`:
|
||||
= note: inside `<std::sync::MutexGuard<'_, u64> as std::ops::Drop>::drop` at RUSTLIB/std/src/sync/poison/mutex.rs:LL:CC
|
||||
= note: inside `std::ptr::drop_in_place::<std::sync::MutexGuard<'_, u64>> - shim(Some(std::sync::MutexGuard<'_, u64>))` at RUSTLIB/core/src/ptr/mod.rs:LL:CC
|
||||
= note: inside `std::ptr::drop_in_place::<EvilSend<std::sync::MutexGuard<'_, u64>>> - shim(Some(EvilSend<std::sync::MutexGuard<'_, u64>>))` at RUSTLIB/core/src/ptr/mod.rs:LL:CC
|
||||
= note: inside `std::mem::drop::<EvilSend<std::sync::MutexGuard<'_, u64>>>` at RUSTLIB/core/src/mem/mod.rs:LL:CC
|
||||
note: inside closure
|
||||
--> tests/genmc/fail/shims/mutex_diff_thread_unlock.rs:LL:CC
|
||||
|
|
||||
LL | drop(guard);
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
note: add `-Zmiri-genmc-print-genmc-output` to MIRIFLAGS to see the detailed GenMC error report
|
||||
|
||||
error: aborting due to 1 previous error; 2 warnings emitted
|
||||
|
||||
22
src/tools/miri/tests/genmc/fail/shims/mutex_double_unlock.rs
Normal file
22
src/tools/miri/tests/genmc/fail/shims/mutex_double_unlock.rs
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
|
||||
//@error-in-other-file: Undefined Behavior
|
||||
|
||||
// Test that GenMC can detect a double unlock of a mutex.
|
||||
// This test will cause an error even if the program actually would work entirely fine despite the double-unlock
|
||||
// because GenMC always assumes a `pthread`-like API.
|
||||
|
||||
#![no_main]
|
||||
|
||||
use std::sync::Mutex;
|
||||
|
||||
static MUTEX: Mutex<u64> = Mutex::new(0);
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
|
||||
let mut guard = MUTEX.lock().unwrap();
|
||||
unsafe {
|
||||
std::ptr::drop_in_place(&raw mut guard);
|
||||
}
|
||||
drop(guard);
|
||||
0
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
Running GenMC Verification...
|
||||
error: Undefined Behavior: Invalid unlock() operation
|
||||
--> RUSTLIB/std/src/sync/poison/mutex.rs:LL:CC
|
||||
|
|
||||
LL | self.lock.inner.unlock();
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: BACKTRACE:
|
||||
= note: inside `<std::sync::MutexGuard<'_, u64> as std::ops::Drop>::drop` at RUSTLIB/std/src/sync/poison/mutex.rs:LL:CC
|
||||
= note: inside `std::ptr::drop_in_place::<std::sync::MutexGuard<'_, u64>> - shim(Some(std::sync::MutexGuard<'_, u64>))` at RUSTLIB/core/src/ptr/mod.rs:LL:CC
|
||||
= note: inside `std::mem::drop::<std::sync::MutexGuard<'_, u64>>` at RUSTLIB/core/src/mem/mod.rs:LL:CC
|
||||
note: inside `miri_start`
|
||||
--> tests/genmc/fail/shims/mutex_double_unlock.rs:LL:CC
|
||||
|
|
||||
LL | drop(guard);
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
note: add `-Zmiri-genmc-print-genmc-output` to MIRIFLAGS to see the detailed GenMC error report
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
||||
42
src/tools/miri/tests/genmc/pass/shims/mutex_deadlock.rs
Normal file
42
src/tools/miri/tests/genmc/pass/shims/mutex_deadlock.rs
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-verbose
|
||||
//@normalize-stderr-test: "Verification took .*s" -> "Verification took [TIME]s"
|
||||
|
||||
// Test that we can detect a deadlock involving `std::sync::Mutex` in GenMC mode.
|
||||
// FIXME(genmc): We cannot detect the deadlock currently. Instead, the deadlocked execution is treated like any other blocked execution.
|
||||
// This behavior matches GenMC's on an equivalent program, and additional analysis is required to detect such deadlocks.
|
||||
// This should become a `fail` test once this deadlock can be detected.
|
||||
//
|
||||
// FIXME(genmc): use `std::thread` once GenMC mode performance is better and produces fewer warnings for compare_exchange.
|
||||
|
||||
#![no_main]
|
||||
#![feature(abort_unwind)]
|
||||
|
||||
#[path = "../../../utils/genmc.rs"]
|
||||
mod genmc;
|
||||
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::genmc::*;
|
||||
|
||||
static X: Mutex<u64> = Mutex::new(0);
|
||||
static Y: Mutex<u64> = Mutex::new(0);
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
|
||||
unsafe {
|
||||
let t0 = spawn_pthread_closure(|| {
|
||||
let mut x = X.lock().unwrap();
|
||||
let mut y = Y.lock().unwrap();
|
||||
*x += 1;
|
||||
*y += 1;
|
||||
});
|
||||
let t1 = spawn_pthread_closure(|| {
|
||||
let mut y = Y.lock().unwrap();
|
||||
let mut x = X.lock().unwrap();
|
||||
*x += 1;
|
||||
*y += 1;
|
||||
});
|
||||
join_pthreads([t0, t1]);
|
||||
0
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
Running GenMC Verification...
|
||||
Verification complete with 3 executions. No errors found.
|
||||
Number of complete executions explored: 2
|
||||
Number of blocked executions seen: 1
|
||||
Verification took [TIME]s.
|
||||
68
src/tools/miri/tests/genmc/pass/shims/mutex_simple.rs
Normal file
68
src/tools/miri/tests/genmc/pass/shims/mutex_simple.rs
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-verbose
|
||||
//@normalize-stderr-test: "Verification took .*s" -> "Verification took [TIME]s"
|
||||
|
||||
// Test various features of the `std::sync::Mutex` API with GenMC.
|
||||
// Miri running with GenMC intercepts the Mutex functions `lock`, `try_lock` and `unlock`, instead of running their actual implementation.
|
||||
// This interception should not break any functionality.
|
||||
//
|
||||
// FIXME(genmc): Once GenMC supports mixed size accesses, add stack/heap allocated Mutexes to the test.
|
||||
// FIXME(genmc): Once the actual implementation of mutexes can be used in GenMC mode and there is a setting to disable Mutex interception: Add test revision without interception.
|
||||
//
|
||||
// Miri provides annotations to GenMC for the condition required to unblock a thread blocked on a Mutex lock call.
|
||||
// This massively reduces the number of blocked executions we need to explore (in this test we require zero blocked execution).
|
||||
// We use verbose output to check that this test always explores zero blocked executions.
|
||||
|
||||
#![no_main]
|
||||
#![feature(abort_unwind)]
|
||||
|
||||
#[path = "../../../utils/genmc.rs"]
|
||||
mod genmc;
|
||||
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::genmc::*;
|
||||
|
||||
const REPS: u64 = 3;
|
||||
|
||||
static LOCK: Mutex<u64> = Mutex::new(0);
|
||||
static OTHER_LOCK: Mutex<u64> = Mutex::new(1234);
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
|
||||
std::panic::abort_unwind(main_);
|
||||
0
|
||||
}
|
||||
|
||||
fn main_() {
|
||||
// Two mutexes should not interfere, holding this guard does not affect the other mutex.
|
||||
let other_guard = OTHER_LOCK.lock().unwrap();
|
||||
|
||||
let guard = LOCK.lock().unwrap();
|
||||
// Trying to lock should fail if the mutex is already held.
|
||||
assert!(LOCK.try_lock().is_err());
|
||||
// Dropping the guard should unlock the mutex correctly.
|
||||
drop(guard);
|
||||
// Trying to lock now should succeed.
|
||||
assert!(LOCK.try_lock().is_ok());
|
||||
|
||||
// Spawn multiple threads interacting with the same mutex.
|
||||
unsafe {
|
||||
let ids = [
|
||||
spawn_pthread_closure(|| {
|
||||
for _ in 0..REPS {
|
||||
*LOCK.lock().unwrap() += 2;
|
||||
}
|
||||
}),
|
||||
spawn_pthread_closure(|| {
|
||||
for _ in 0..REPS {
|
||||
*LOCK.lock().unwrap() += 4;
|
||||
}
|
||||
}),
|
||||
];
|
||||
join_pthreads(ids);
|
||||
}
|
||||
// Due to the Mutex, all increments should be visible in every explored execution.
|
||||
assert!(*LOCK.lock().unwrap() == REPS * 6);
|
||||
|
||||
drop(other_guard);
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
Running GenMC Verification...
|
||||
Verification complete with 20 executions. No errors found.
|
||||
Verification took [TIME]s.
|
||||
Loading…
Add table
Add a link
Reference in a new issue