Merge pull request #4506 from Patrick-6/miri-genmc-mvp

Add minimal functionality for using GenMC mode
This commit is contained in:
Ralf Jung 2025-09-03 13:25:37 +00:00 committed by GitHub
commit ddd0cefd38
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
87 changed files with 3668 additions and 482 deletions

View file

@ -363,9 +363,9 @@ dependencies = [
[[package]]
name = "cxx"
version = "1.0.161"
version = "1.0.173"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3523cc02ad831111491dd64b27ad999f1ae189986728e477604e61b81f828df"
checksum = "6c64ed3da1c337cbaae7223cdcff8b4dddf698d188cd3eaddd1116f6b0295950"
dependencies = [
"cc",
"cxxbridge-cmd",
@ -377,9 +377,9 @@ dependencies = [
[[package]]
name = "cxx-build"
version = "1.0.161"
version = "1.0.173"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "212b754247a6f07b10fa626628c157593f0abf640a3dd04cce2760eca970f909"
checksum = "ae0a26a75a05551f5ae3d75b3557543d06682284eaa7419113162d602cb45766"
dependencies = [
"cc",
"codespan-reporting",
@ -392,9 +392,9 @@ dependencies = [
[[package]]
name = "cxxbridge-cmd"
version = "1.0.161"
version = "1.0.173"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f426a20413ec2e742520ba6837c9324b55ffac24ead47491a6e29f933c5b135a"
checksum = "952d408b6002b7db4b36da07c682a9cbb34f2db0efa03e976ae50a388414e16c"
dependencies = [
"clap",
"codespan-reporting",
@ -406,15 +406,15 @@ dependencies = [
[[package]]
name = "cxxbridge-flags"
version = "1.0.161"
version = "1.0.173"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a258b6069020b4e5da6415df94a50ee4f586a6c38b037a180e940a43d06a070d"
checksum = "ccbd201b471c75c6abb6321cace706d1982d270e308b891c11a3262d320f5265"
[[package]]
name = "cxxbridge-macro"
version = "1.0.161"
version = "1.0.173"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8dec184b52be5008d6eaf7e62fc1802caf1ad1227d11b3b7df2c409c7ffc3f4"
checksum = "2bea8b915bbc4cb4288f242aa7ca18b23ecc6965e4d6e7c1b07905e3fe2e0c41"
dependencies = [
"indexmap",
"proc-macro2",
@ -501,9 +501,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "foldhash"
version = "0.1.5"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
[[package]]
name = "form_urlencoded"

View file

@ -70,7 +70,7 @@ harness = false
[features]
default = ["stack-cache", "native-lib"]
genmc = ["dep:genmc-sys"] # this enables a GPL dependency!
genmc = ["dep:genmc-sys"]
stack-cache = []
stack-cache-consistency-check = ["stack-cache"]
tracing = ["serde_json"]

View file

@ -9,9 +9,7 @@ Miri-GenMC integrates that model checker into Miri.
## Usage
**IMPORTANT: The license of GenMC and thus the `genmc-sys` crate in the Miri repo is currently "GPL-3.0-or-later", so a binary produced with the `genmc` feature is subject to the requirements of the GPL. As long as that remains the case, the `genmc` feature of Miri is OFF-BY-DEFAULT and must be OFF for all Miri releases.**
For testing/developing Miri-GenMC (while keeping in mind the licensing issues):
For testing/developing Miri-GenMC:
- clone the Miri repo.
- build Miri-GenMC with `./miri build --features=genmc`.
- OR: install Miri-GenMC in the current system with `./miri install --features=genmc`
@ -21,6 +19,21 @@ Basic usage:
MIRIFLAGS="-Zmiri-genmc" cargo miri run
```
Note that `cargo miri test` in GenMC mode is currently not supported.
### Supported Parameters
- `-Zmiri-genmc`: Enable GenMC mode (not required if any other GenMC options are used).
- `-Zmiri-genmc-log=LOG_LEVEL`: Change the log level for GenMC. Default: `warning`.
- `quiet`: Disable logging.
- `error`: Print errors.
- `warning`: Print errors and warnings.
- `tip`: Print errors, warnings and tips.
- If Miri is built with debug assertions, there are additional log levels available (downgraded to `tip` without debug assertions):
- `debug1`: Print revisits considered by GenMC.
- `debug2`: Print the execution graph after every memory access.
- `debug3`: Print reads-from values considered by GenMC.
<!-- FIXME(genmc): explain options. -->
<!-- FIXME(genmc): explain Miri-GenMC specific functions. -->
@ -57,6 +70,15 @@ The process for obtaining them is as follows:
If you place this directory inside the Miri folder, it is recommended to call it `genmc-src` as that tells `./miri fmt` to avoid
formatting the Rust files inside that folder.
### Formatting the C++ code
For formatting the C++ code we provide a `.clang-format` file in the `genmc-sys` directory.
With `clang-format` installed, run this command to format the c++ files (replace the `-i` with `--dry-run` to just see the changes.):
```
find ./genmc-sys/cpp/ -name "*.cpp" -o -name "*.hpp" | xargs clang-format --style=file:"./genmc-sys/.clang-format" -i
```
NOTE: this is currently not done automatically on pull requests to Miri.
<!-- FIXME(genmc): explain how submitting code to GenMC should be handled. -->
<!-- FIXME(genmc): explain development. -->

View file

@ -0,0 +1,52 @@
# .clang-format
BasedOnStyle: LLVM
Standard: c++20
ColumnLimit: 100
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AllowShortBlocksOnASingleLine: Empty
BreakBeforeBraces: Attach
BinPackArguments: false
BinPackParameters: false
AllowAllParametersOfDeclarationOnNextLine: false
AlwaysBreakAfterReturnType: None
# Force parameters to break and align
AlignAfterOpenBracket: BlockIndent
AllowAllArgumentsOnNextLine: false
# Spacing around braces and parentheses
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceInEmptyBlock: false
SpacesInContainerLiterals: true
SpacesInParensOptions:
InCStyleCasts: false
InConditionalStatements: false
InEmptyParentheses: false
Other: false
SpacesInSquareBrackets: false
# Brace spacing for initializers
Cpp11BracedListStyle: false
SpaceBeforeCpp11BracedList: true
# Import grouping: group standard, external, and project includes.
IncludeBlocks: Regroup
SortIncludes: true
# Granularity: sort includes per module/file.
IncludeIsMainRegex: '([-_](test|unittest))?$'
# Miscellaneous
SpaceAfterCStyleCast: true
SpaceBeforeParens: ControlStatements
PointerAlignment: Left
IndentCaseLabels: true
IndentWidth: 4
TabWidth: 4
UseTab: Never

View file

@ -1,17 +1,15 @@
[package]
authors = ["Miri Team"]
# The parts in this repo are MIT OR Apache-2.0, but we are linking in
# code from https://github.com/MPI-SWS/genmc which is GPL-3.0-or-later.
license = "(MIT OR Apache-2.0) AND GPL-3.0-or-later"
license = "MIT OR Apache-2.0"
name = "genmc-sys"
version = "0.1.0"
edition = "2024"
[dependencies]
cxx = { version = "1.0.160", features = ["c++20"] }
cxx = { version = "1.0.173", features = ["c++20"] }
[build-dependencies]
cc = "1.2.16"
cmake = "0.1.54"
git2 = { version = "0.20.2", default-features = false, features = ["https"] }
cxx-build = { version = "1.0.160", features = ["parallel"] }
cxx-build = { version = "1.0.173", features = ["parallel"] }

View file

@ -26,9 +26,9 @@ mod downloading {
use super::GENMC_DOWNLOAD_PATH;
/// The GenMC repository the we get our commit from.
pub(crate) const GENMC_GITHUB_URL: &str = "https://github.com/MPI-SWS/genmc.git";
pub(crate) const GENMC_GITHUB_URL: &str = "https://gitlab.inf.ethz.ch/public-plf/genmc.git";
/// The GenMC commit we depend on. It must be available on the specified GenMC repository.
pub(crate) const GENMC_COMMIT: &str = "3438dd2c1202cd4a47ed7881d099abf23e4167ab";
pub(crate) const GENMC_COMMIT: &str = "af9cc9ccd5d412b16defc35dbf36571c63a19c76";
pub(crate) fn download_genmc() -> PathBuf {
let Ok(genmc_download_path) = PathBuf::from_str(GENMC_DOWNLOAD_PATH);
@ -176,6 +176,11 @@ fn link_to_llvm(config_file: &Path) -> (String, String) {
/// Build the GenMC model checker library and the Rust-C++ interop library with cxx.rs
fn compile_cpp_dependencies(genmc_path: &Path) {
// Give each step a separate build directory to prevent interference.
let out_dir = PathBuf::from(std::env::var("OUT_DIR").as_deref().unwrap());
let genmc_build_dir = out_dir.join("genmc");
let interface_build_dir = out_dir.join("miri_genmc");
// Part 1:
// Compile the GenMC library using cmake.
@ -186,8 +191,11 @@ fn compile_cpp_dependencies(genmc_path: &Path) {
let enable_genmc_debug = matches!(std::env::var("PROFILE").as_deref().unwrap(), "debug");
let mut config = cmake::Config::new(cmakelists_path);
config.profile(GENMC_CMAKE_PROFILE);
config.define("GENMC_DEBUG", if enable_genmc_debug { "ON" } else { "OFF" });
config
.always_configure(false) // We don't need to reconfigure on subsequent compilation runs.
.out_dir(genmc_build_dir)
.profile(GENMC_CMAKE_PROFILE)
.define("GENMC_DEBUG", if enable_genmc_debug { "ON" } else { "OFF" });
// The actual compilation happens here:
let genmc_install_dir = config.build();
@ -210,6 +218,13 @@ fn compile_cpp_dependencies(genmc_path: &Path) {
// These definitions are parsed into a cmake list and then printed to the config.h file, so they are ';' separated.
let definitions = llvm_definitions.split(";");
let cpp_files = [
"./cpp/src/MiriInterface/EventHandling.cpp",
"./cpp/src/MiriInterface/Exploration.cpp",
"./cpp/src/MiriInterface/Setup.cpp",
"./cpp/src/MiriInterface/ThreadManagement.cpp",
];
let mut bridge = cxx_build::bridge("src/lib.rs");
// FIXME(genmc,cmake): Remove once the GenMC debug setting is available in the config.h file.
if enable_genmc_debug {
@ -225,9 +240,9 @@ fn compile_cpp_dependencies(genmc_path: &Path) {
.std("c++23")
.include(genmc_include_dir)
.include(llvm_include_dirs)
.include("./src_cpp")
.file("./src_cpp/MiriInterface.hpp")
.file("./src_cpp/MiriInterface.cpp")
.include("./cpp/include")
.files(&cpp_files)
.out_dir(interface_build_dir)
.compile("genmc_interop");
// Link the Rust-C++ interface library generated by cxx_build:
@ -235,15 +250,8 @@ fn compile_cpp_dependencies(genmc_path: &Path) {
}
fn main() {
// Make sure we don't accidentally distribute a binary with GPL code.
if option_env!("RUSTC_STAGE").is_some() {
panic!(
"genmc should not be enabled in the rustc workspace since it includes a GPL dependency"
);
}
// Select which path to use for the GenMC repo:
let genmc_path = if let Ok(genmc_src_path) = std::env::var("GENMC_SRC_PATH") {
let genmc_path = if let Some(genmc_src_path) = option_env!("GENMC_SRC_PATH") {
let genmc_src_path =
PathBuf::from_str(&genmc_src_path).expect("GENMC_SRC_PATH should contain a valid path");
assert!(
@ -251,6 +259,8 @@ fn main() {
"GENMC_SRC_PATH={} does not exist!",
genmc_src_path.display()
);
// Rebuild files in the given path change.
println!("cargo::rerun-if-changed={}", genmc_src_path.display());
genmc_src_path
} else {
downloading::download_genmc()
@ -265,5 +275,5 @@ fn main() {
// cloned (since cargo detects that as a file modification).
println!("cargo::rerun-if-changed={RUST_CXX_BRIDGE_FILE_PATH}");
println!("cargo::rerun-if-changed=./src");
println!("cargo::rerun-if-changed=./src_cpp");
println!("cargo::rerun-if-changed=./cpp");
}

View file

@ -0,0 +1,274 @@
#ifndef GENMC_MIRI_INTERFACE_HPP
#define GENMC_MIRI_INTERFACE_HPP
// CXX.rs generated headers:
#include "rust/cxx.h"
// GenMC generated headers:
#include "config.h"
// Miri `genmc-sys/src_cpp` headers:
#include "ResultHandling.hpp"
// GenMC headers:
#include "ExecutionGraph/EventLabel.hpp"
#include "Static/ModuleID.hpp"
#include "Support/MemOrdering.hpp"
#include "Support/RMWOps.hpp"
#include "Verification/Config.hpp"
#include "Verification/GenMCDriver.hpp"
// C++ headers:
#include <cstdint>
#include <format>
#include <iomanip>
#include <memory>
/**** Types available to both Rust and C++ ****/
struct GenmcParams;
enum class LogLevel : std::uint8_t;
struct GenmcScalar;
struct SchedulingResult;
struct LoadResult;
struct StoreResult;
// GenMC uses `int` for its thread IDs.
using ThreadId = int;
/// Set the log level for GenMC.
///
/// # Safety
///
/// This function is not thread safe, since it writes to the global, mutable, non-atomic `logLevel`
/// variable. Any GenMC function may read from `logLevel` unsynchronized.
/// The safest way to use this function is to set the log level exactly once before first calling
/// `create_handle`.
/// Never calling this function is safe, GenMC will fall back to its default log level.
/* unsafe */ void set_log_level_raw(LogLevel log_level);
struct MiriGenmcShim : private GenMCDriver {
public:
MiriGenmcShim(std::shared_ptr<const Config> conf, Mode mode /* = VerificationMode{} */)
: GenMCDriver(std::move(conf), nullptr, mode) {}
/// Create a new `MiriGenmcShim`, which wraps a `GenMCDriver`.
///
/// # Safety
///
/// This function is marked as unsafe since the `logLevel` global variable is non-atomic.
/// This function should not be called in an unsynchronized way with `set_log_level_raw`,
/// since this function and any methods on the returned `MiriGenmcShim` may read the
/// `logLevel`, causing a data race. The safest way to use these functions is to call
/// `set_log_level_raw` once, and only then start creating handles. There should not be any
/// other (safe) way to create a `MiriGenmcShim`.
/* unsafe */ static auto create_handle(const GenmcParams& params)
-> std::unique_ptr<MiriGenmcShim>;
virtual ~MiriGenmcShim() {}
/**** Execution start/end handling ****/
// This function must be called at the start of any execution, before any events are
// reported to GenMC.
void handle_execution_start();
// This function must be called at the end of any execution, even if an error was found
// during the execution.
// Returns `null`, or a string containing an error message if an error occured.
std::unique_ptr<std::string> handle_execution_end();
/***** Functions for handling events encountered during program execution. *****/
/**** Memory access handling ****/
[[nodiscard]] LoadResult handle_load(
ThreadId thread_id,
uint64_t address,
uint64_t size,
MemOrdering ord,
GenmcScalar old_val
);
[[nodiscard]] StoreResult handle_store(
ThreadId thread_id,
uint64_t address,
uint64_t size,
GenmcScalar value,
GenmcScalar old_val,
MemOrdering ord
);
/**** Memory (de)allocation ****/
auto handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment) -> uint64_t;
void handle_free(ThreadId thread_id, uint64_t address);
/**** Thread management ****/
void handle_thread_create(ThreadId thread_id, ThreadId parent_id);
void handle_thread_join(ThreadId thread_id, ThreadId child_id);
void handle_thread_finish(ThreadId thread_id, uint64_t ret_val);
void handle_thread_kill(ThreadId thread_id);
/***** Exploration related functionality *****/
/** Ask the GenMC scheduler for a new thread to schedule and return whether the execution is
* finished, blocked, or can continue.
* Updates the next instruction kind for the given thread id. */
auto schedule_next(const int curr_thread_id, const ActionKind curr_thread_next_instr_kind)
-> SchedulingResult;
/**
* Check whether there are more executions to explore.
* If there are more executions, this method prepares for the next execution and returns
* `true`. Returns true if there are no more executions to explore. */
auto is_exploration_done() -> bool {
return GenMCDriver::done();
}
/**** Result querying functionality. ****/
// NOTE: We don't want to share the `VerificationResult` type with the Rust side, since it
// is very large, uses features that CXX.rs doesn't support and may change as GenMC changes.
// Instead, we only use the result on the C++ side, and only expose these getter function to
// the Rust side.
// Note that CXX.rs doesn't support returning a C++ string to Rust by value,
// it must be behind an indirection like a `unique_ptr` (tested with CXX 1.0.170).
/// Get the number of blocked executions encountered by GenMC (cast into a fixed with
/// integer)
auto get_blocked_execution_count() const -> uint64_t {
return static_cast<uint64_t>(getResult().exploredBlocked);
}
/// Get the number of executions explored by GenMC (cast into a fixed with integer)
auto get_explored_execution_count() const -> uint64_t {
return static_cast<uint64_t>(getResult().explored);
}
/// Get all messages that GenMC produced (errors, warnings), combined into one string.
auto get_result_message() const -> std::unique_ptr<std::string> {
return std::make_unique<std::string>(getResult().message);
}
/// If an error occurred, return a string describing the error, otherwise, return `nullptr`.
auto get_error_string() const -> std::unique_ptr<std::string> {
const auto& result = GenMCDriver::getResult();
if (result.status.has_value())
return format_error(result.status.value());
return nullptr;
}
private:
/** Increment the event index in the given thread by 1 and return the new event. */
[[nodiscard]] inline auto inc_pos(ThreadId tid) -> Event {
ERROR_ON(tid >= threads_action_.size(), "ThreadId out of bounds");
return ++threads_action_[tid].event;
}
/** Decrement the event index in the given thread by 1 and return the new event. */
inline auto dec_pos(ThreadId tid) -> Event {
ERROR_ON(tid >= threads_action_.size(), "ThreadId out of bounds");
return --threads_action_[tid].event;
}
/**
* Helper function for loads that need to reset the event counter when no value is returned.
* Same syntax as `GenMCDriver::handleLoad`, but this takes a thread id instead of an Event.
* Automatically calls `inc_pos` and `dec_pos` where needed for the given thread.
*/
template <EventLabel::EventLabelKind k, typename... Ts>
auto handle_load_reset_if_none(ThreadId tid, Ts&&... params) -> HandleResult<SVal> {
const auto pos = inc_pos(tid);
const auto ret = GenMCDriver::handleLoad<k>(pos, std::forward<Ts>(params)...);
// If we didn't get a value, we have to reset the index of the current thread.
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(tid);
}
return ret;
}
/**
* GenMC uses the term `Action` to refer to a struct of:
* - `ActionKind`, storing whether the next instruction in a thread may be a load
* - `Event`, storing the most recent event index added for a thread
*
* Here we store the "action" for each thread. In particular we use this to assign event
* indices, since GenMC expects us to do that.
*/
std::vector<Action> threads_action_;
};
/// Get the bit mask that GenMC expects for global memory allocations.
/// FIXME(genmc): currently we use `get_global_alloc_static_mask()` to ensure the
/// `SAddr::staticMask` constant is consistent between Miri and GenMC, but if
/// https://github.com/dtolnay/cxx/issues/1051 is fixed we could share the constant
/// directly.
constexpr auto get_global_alloc_static_mask() -> uint64_t {
return SAddr::staticMask;
}
// CXX.rs generated headers:
// NOTE: this must be included *after* `MiriGenmcShim` and all the other types we use are defined,
// otherwise there will be compilation errors due to missing definitions.
#include "genmc-sys/src/lib.rs.h"
/**** Result handling ****/
// NOTE: these must come after the cxx_bridge generated code, since that contains the actual
// definitions of these types.
namespace GenmcScalarExt {
inline GenmcScalar uninit() {
return GenmcScalar {
/* value: */ 0,
/* is_init: */ false,
};
}
inline GenmcScalar from_sval(SVal sval) {
return GenmcScalar {
/* value: */ sval.get(),
/* is_init: */ true,
};
}
inline SVal to_sval(GenmcScalar scalar) {
ERROR_ON(!scalar.is_init, "Cannot convert an uninitialized `GenmcScalar` into an `SVal`\n");
return SVal(scalar.value);
}
} // namespace GenmcScalarExt
namespace LoadResultExt {
inline LoadResult no_value() {
return LoadResult {
/* error: */ std::unique_ptr<std::string>(nullptr),
/* has_value: */ false,
/* read_value: */ GenmcScalarExt::uninit(),
};
}
inline LoadResult from_value(SVal read_value) {
return LoadResult { /* error: */ std::unique_ptr<std::string>(nullptr),
/* has_value: */ true,
/* read_value: */ GenmcScalarExt::from_sval(read_value) };
}
inline LoadResult from_error(std::unique_ptr<std::string> error) {
return LoadResult { /* error: */ std::move(error),
/* has_value: */ false,
/* read_value: */ GenmcScalarExt::uninit() };
}
} // namespace LoadResultExt
namespace StoreResultExt {
inline StoreResult ok(bool is_coherence_order_maximal_write) {
return StoreResult { /* error: */ std::unique_ptr<std::string>(nullptr),
is_coherence_order_maximal_write };
}
inline StoreResult from_error(std::unique_ptr<std::string> error) {
return StoreResult { /* error: */ std::move(error),
/* is_coherence_order_maximal_write: */ false };
}
} // namespace StoreResultExt
#endif /* GENMC_MIRI_INTERFACE_HPP */

View file

@ -0,0 +1,21 @@
#ifndef GENMC_RESULT_HANDLING_HPP
#define GENMC_RESULT_HANDLING_HPP
// CXX.rs generated headers:
#include "rust/cxx.h"
// GenMC headers:
#include "Verification/VerificationError.hpp"
#include <string>
/** Information about an error, formatted as a string to avoid having to share an error enum and
* printing functionality with the Rust side. */
static auto format_error(VerificationError err) -> std::unique_ptr<std::string> {
auto buf = std::string();
auto s = llvm::raw_string_ostream(buf);
s << err;
return std::make_unique<std::string>(s.str());
}
#endif /* GENMC_RESULT_HANDLING_HPP */

View file

@ -0,0 +1,120 @@
/** This file contains functionality related to handling events encountered
* during an execution, such as loads, stores or memory (de)allocation. */
#include "MiriInterface.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "ADT/value_ptr.hpp"
#include "ExecutionGraph/EventLabel.hpp"
#include "ExecutionGraph/LoadAnnotation.hpp"
#include "Runtime/InterpreterEnumAPI.hpp"
#include "Static/ModuleID.hpp"
#include "Support/ASize.hpp"
#include "Support/Error.hpp"
#include "Support/Logger.hpp"
#include "Support/MemAccess.hpp"
#include "Support/RMWOps.hpp"
#include "Support/SAddr.hpp"
#include "Support/SVal.hpp"
#include "Support/ThreadInfo.hpp"
#include "Support/Verbosity.hpp"
#include "Verification/GenMCDriver.hpp"
#include "Verification/MemoryModel.hpp"
// C++ headers:
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
/**** Memory access handling ****/
[[nodiscard]] auto MiriGenmcShim::handle_load(
ThreadId thread_id,
uint64_t address,
uint64_t size,
MemOrdering ord,
GenmcScalar old_val
) -> LoadResult {
// `type` is only used for printing.
const auto type = AType::Unsigned;
const auto ret = handle_load_reset_if_none<EventLabel::EventLabelKind::Read>(
thread_id,
ord,
SAddr(address),
ASize(size),
type
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return LoadResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&ret);
if (ret_val == nullptr)
ERROR("Unimplemented: load returned unexpected result.");
return LoadResultExt::from_value(*ret_val);
}
[[nodiscard]] auto MiriGenmcShim::handle_store(
ThreadId thread_id,
uint64_t address,
uint64_t size,
GenmcScalar value,
GenmcScalar old_val,
MemOrdering ord
) -> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::Write>(
pos,
ord,
SAddr(address),
ASize(size),
/* type */ AType::Unsigned, // `type` is only used for printing.
GenmcScalarExt::to_sval(value),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return StoreResultExt::from_error(format_error(*err));
if (!std::holds_alternative<std::monostate>(ret))
ERROR("store returned unexpected result");
// FIXME(genmc,mixed-accesses): Use the value that GenMC returns from handleStore (once
// available).
const auto& g = getExec().getGraph();
return StoreResultExt::ok(
/* is_coherence_order_maximal_write */ g.co_max(SAddr(address))->getPos() == pos
);
}
/**** Memory (de)allocation ****/
auto MiriGenmcShim::handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment)
-> uint64_t {
const auto pos = inc_pos(thread_id);
// These are only used for printing and features Miri-GenMC doesn't support (yet).
const auto storage_duration = StorageDuration::SD_Heap;
// Volatile, as opposed to "persistent" (i.e., non-volatile memory that persists over reboots)
const auto storage_type = StorageType::ST_Volatile;
const auto address_space = AddressSpace::AS_User;
const SVal ret_val = GenMCDriver::handleMalloc(
pos,
size,
alignment,
storage_duration,
storage_type,
address_space,
EventDeps()
);
return ret_val.get();
}
void MiriGenmcShim::handle_free(ThreadId thread_id, uint64_t address) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleFree(pos, SAddr(address), EventDeps());
// FIXME(genmc): add error handling once GenMC returns errors from `handleFree`
}

View file

@ -0,0 +1,42 @@
/** This file contains functionality related to exploration, such as scheduling. */
#include "MiriInterface.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "Support/Error.hpp"
#include "Support/Verbosity.hpp"
// C++ headers:
#include <cstdint>
auto MiriGenmcShim::schedule_next(
const int curr_thread_id,
const ActionKind curr_thread_next_instr_kind
) -> SchedulingResult {
// The current thread is the only one where the `kind` could have changed since we last made
// a scheduling decision.
threads_action_[curr_thread_id].kind = curr_thread_next_instr_kind;
if (const auto result = GenMCDriver::scheduleNext(threads_action_))
return SchedulingResult { ExecutionState::Ok, static_cast<int32_t>(result.value()) };
if (GenMCDriver::isExecutionBlocked())
return SchedulingResult { ExecutionState::Blocked, 0 };
return SchedulingResult { ExecutionState::Finished, 0 };
}
/**** Execution start/end handling ****/
void MiriGenmcShim::handle_execution_start() {
threads_action_.clear();
threads_action_.push_back(Action(ActionKind::Load, Event::getInit()));
GenMCDriver::handleExecutionStart();
}
auto MiriGenmcShim::handle_execution_end() -> std::unique_ptr<std::string> {
// FIXME(genmc): add error handling once GenMC returns an error here.
GenMCDriver::handleExecutionEnd();
return {};
}

View file

@ -0,0 +1,185 @@
/** This file contains functionality related to creation of the GenMCDriver,
* including translating settings set by Miri. */
#include "MiriInterface.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "Support/Error.hpp"
#include "Support/Verbosity.hpp"
#include "Verification/InterpreterCallbacks.hpp"
// C++ headers:
#include <cstdint>
/**
* Translate the Miri-GenMC `LogLevel` to the GenMC `VerbosityLevel`.
* Downgrade any debug options to `Tip` if `ENABLE_GENMC_DEBUG` is not enabled.
*/
static auto to_genmc_verbosity_level(const LogLevel log_level) -> VerbosityLevel {
switch (log_level) {
case LogLevel::Quiet:
return VerbosityLevel::Quiet;
case LogLevel::Error:
return VerbosityLevel::Error;
case LogLevel::Warning:
return VerbosityLevel::Warning;
case LogLevel::Tip:
return VerbosityLevel::Tip;
#ifdef ENABLE_GENMC_DEBUG
case LogLevel::Debug1Revisits:
return VerbosityLevel::Debug1;
case LogLevel::Debug2MemoryAccesses:
return VerbosityLevel::Debug2;
case LogLevel::Debug3ReadsFrom:
return VerbosityLevel::Debug3;
#else
// Downgrade to `Tip` if the debug levels are not available.
case LogLevel::Debug1Revisits:
case LogLevel::Debug2MemoryAccesses:
case LogLevel::Debug3ReadsFrom:
return VerbosityLevel::Tip;
#endif
default:
WARN_ONCE(
"unknown-log-level",
"Unknown `LogLevel`, defaulting to `VerbosityLevel::Tip`."
);
return VerbosityLevel::Tip;
}
}
/* unsafe */ void set_log_level_raw(LogLevel log_level) {
// The `logLevel` is a static, non-atomic variable.
// It should never be changed if `MiriGenmcShim` still exists, since any of its methods may read
// the `logLevel`, otherwise it may cause data races.
logLevel = to_genmc_verbosity_level(log_level);
}
/* unsafe */ auto MiriGenmcShim::create_handle(const GenmcParams& params)
-> std::unique_ptr<MiriGenmcShim> {
auto conf = std::make_shared<Config>();
// Set whether GenMC should print execution graphs after every explored/blocked execution.
// FIXME(genmc): pass these settings from Miri.
conf->printExecGraphs = false;
conf->printBlockedExecs = false;
// `1024` is the default value that GenMC uses.
// If any thread has at least this many events, a warning/tip will be printed.
//
// Miri produces a lot more events than GenMC, so the graph size warning triggers on almost
// all programs. The current value is large enough so the warning is not be triggered by any
// reasonable programs.
// FIXME(genmc): The emitted warning mentions features not supported by Miri ('--unroll'
// parameter).
// FIXME(genmc): A more appropriate limit should be chosen once the warning is useful for
// Miri.
conf->warnOnGraphSize = 1024 * 1024;
// We only support the RC11 memory model for Rust.
conf->model = ModelType::RC11;
// This prints the seed that GenMC picks for randomized scheduling during estimation mode.
conf->printRandomScheduleSeed = params.print_random_schedule_seed;
// FIXME(genmc): supporting IPR requires annotations for `assume` and `spinloops`.
conf->ipr = false;
// FIXME(genmc): supporting BAM requires `Barrier` support + detecting whether return value
// of barriers are used.
conf->disableBAM = true;
// Instruction caching could help speed up verification by filling the graph from cache, if
// the list of values read by all load events in a thread have been seen before. Combined
// with not replaying completed threads, this can also reducing the amount of Mir
// interpretation required by Miri. With the current setup, this would be incorrect, since
// Miri doesn't give GenMC the actual values read by non-atomic reads.
conf->instructionCaching = false;
// Many of Miri's checks work under the assumption that threads are only executed in an
// order that could actually happen during a normal execution. Formally, this means that
// replaying an execution needs to respect the po-rf-relation of the executiongraph (po ==
// program-order, rf == reads-from). This means, any event in the graph, when replayed, must
// happen after any events that happen before it in the same graph according to the program
// code, and all (non-atomic) reads must happen after the write event they read from.
//
// Not replaying completed threads means any read event from that thread never happens in
// Miri's memory, so this would only work if there are never any non-atomic reads from any
// value written by the skipped thread.
conf->replayCompletedThreads = true;
// FIXME(genmc): implement symmetry reduction.
ERROR_ON(
params.do_symmetry_reduction,
"Symmetry reduction is currently unsupported in GenMC mode."
);
conf->symmetryReduction = params.do_symmetry_reduction;
// FIXME(genmc): expose this setting to Miri (useful for testing Miri-GenMC).
conf->schedulePolicy = SchedulePolicy::WF;
// Set the mode used for this driver, either estimation or verification.
// FIXME(genmc): implement estimation mode.
const auto mode = GenMCDriver::Mode(GenMCDriver::VerificationMode {});
// Running Miri-GenMC without race detection is not supported.
// Disabling this option also changes the behavior of the replay scheduler to only schedule
// at atomic operations, which is required with Miri. This happens because Miri can generate
// multiple GenMC events for a single MIR terminator. Without this option, the scheduler
// might incorrectly schedule an atomic MIR terminator because the first event it creates is
// a non-atomic (e.g., `StorageLive`).
conf->disableRaceDetection = false;
// Miri can already check for unfreed memory. Also, GenMC cannot distinguish between memory
// that is allowed to leak and memory that is not.
conf->warnUnfreedMemory = false;
// FIXME(genmc,error handling): This function currently exits on error, but will return an
// error value in the future. The return value should be checked once this change is made.
checkConfig(*conf);
// Create the actual driver and Miri-GenMC communication shim.
auto driver = std::make_unique<MiriGenmcShim>(std::move(conf), mode);
// FIXME(genmc,HACK): Until a proper solution is implemented in GenMC, these callbacks will
// allow Miri to return information about global allocations and override uninitialized memory
// checks for non-atomic loads (Miri handles those without GenMC, so the error would be wrong).
auto interpreter_callbacks = InterpreterCallbacks {
// Miri already ensures that memory accesses are valid, so this check doesn't matter.
// We check that the address is static, but skip checking if it is part of an actual
// allocation.
/* isStaticallyAllocated: */ [](SAddr addr) { return addr.isStatic(); },
// FIXME(genmc,error reporting): Once a proper a proper API for passing such information is
// implemented in GenMC, Miri should use it to improve the produced error messages.
/* getStaticName: */ [](SAddr addr) { return "[UNKNOWN STATIC]"; },
// This function is called to get the initial value stored at the given address.
//
// From a Miri perspective, this API doesn't work very well: most memory starts out
// "uninitialized";
// only statics have an initial value. And their initial value is just a sequence of bytes,
// but GenMC
// expect this to be already split into separate atomic variables. So we return a dummy
// value.
// This value should never be visible to the interpreted program.
// GenMC does not understand uninitialized memory the same way Miri does, which may cause
// this function to be called. The returned value can be visible to Miri or the user:
// - Printing the execution graph may contain this value in place of uninitialized values.
// FIXME(genmc): NOTE: printing the execution graph is not yet implemented.
// - Non-atomic loads may return this value, but Miri ignores values of non-atomic loads.
// - Atomic loads will *not* see this value once mixed atomic-non-atomic support is added.
// Currently, atomic loads can see this value, unless initialized by an *atomic* store.
// FIXME(genmc): update this comment once mixed atomic-non-atomic support is added.
//
// FIXME(genmc): implement proper support for uninitialized memory in GenMC. Ideally, the
// initial value getter would return an `optional<SVal>`, since the memory location may be
// uninitialized.
/* initValGetter: */ [](const AAccess& a) { return SVal(0xDEAD); },
// Miri serves non-atomic loads from its own memory and these GenMC checks are wrong in
// that case. This should no longer be required with proper mixed-size access support.
/* skipUninitLoadChecks: */ [](MemOrdering ord) { return ord == MemOrdering::NotAtomic; },
};
driver->setInterpCallbacks(std::move(interpreter_callbacks));
return driver;
}

View file

@ -0,0 +1,54 @@
/** This file contains functionality related thread management (creation, finishing, join, etc.) */
#include "MiriInterface.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "Support/Error.hpp"
#include "Support/Verbosity.hpp"
// C++ headers:
#include <cstdint>
void MiriGenmcShim::handle_thread_create(ThreadId thread_id, ThreadId parent_id) {
// NOTE: The threadCreate event happens in the parent:
const auto pos = inc_pos(parent_id);
// FIXME(genmc): for supporting symmetry reduction, these will need to be properly set:
const unsigned fun_id = 0;
const SVal arg = SVal(0);
const ThreadInfo child_info = ThreadInfo { thread_id, parent_id, fun_id, arg };
// NOTE: Default memory ordering (`Release`) used here.
const auto child_tid = GenMCDriver::handleThreadCreate(pos, child_info, EventDeps());
// Sanity check the thread id, which is the index in the `threads_action_` array.
BUG_ON(child_tid != thread_id || child_tid <= 0 || child_tid != threads_action_.size());
threads_action_.push_back(Action(ActionKind::Load, Event(child_tid, 0)));
}
void MiriGenmcShim::handle_thread_join(ThreadId thread_id, ThreadId child_id) {
// The thread join event happens in the parent.
const auto pos = inc_pos(thread_id);
// NOTE: Default memory ordering (`Acquire`) used here.
const auto ret = GenMCDriver::handleThreadJoin(pos, child_id, EventDeps());
// If the join failed, decrease the event index again:
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(thread_id);
}
// NOTE: Thread return value is ignored, since Miri doesn't need it.
}
void MiriGenmcShim::handle_thread_finish(ThreadId thread_id, uint64_t ret_val) {
const auto pos = inc_pos(thread_id);
// NOTE: Default memory ordering (`Release`) used here.
GenMCDriver::handleThreadFinish(pos, SVal(ret_val));
}
void MiriGenmcShim::handle_thread_kill(ThreadId thread_id) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleThreadKill(pos);
}

View file

@ -1,17 +1,90 @@
use std::str::FromStr;
use std::sync::OnceLock;
pub use cxx::UniquePtr;
pub use self::ffi::*;
/// Defined in "genmc/src/Support/SAddr.hpp".
/// The first bit of all global addresses must be set to `1`.
/// This means the mask, interpreted as an address, is the lower bound of where the global address space starts.
///
/// FIXME(genmc): rework this if non-64bit support is added to GenMC (the current allocation scheme only allows for 64bit addresses).
/// FIXME(genmc): currently we use `get_global_alloc_static_mask()` to ensure the constant is consistent between Miri and GenMC,
/// but if https://github.com/dtolnay/cxx/issues/1051 is fixed we could share the constant directly.
pub const GENMC_GLOBAL_ADDRESSES_MASK: u64 = 1 << 63;
/// GenMC thread ids are C++ type `int`, which is equivalent to Rust's `i32` on most platforms.
/// The main thread always has thread id 0.
pub const GENMC_MAIN_THREAD_ID: i32 = 0;
/// Changing GenMC's log level is not thread safe, so we limit it to only be set once to prevent any data races.
/// This value will be initialized when the first `MiriGenmcShim` is created.
static GENMC_LOG_LEVEL: OnceLock<LogLevel> = OnceLock::new();
// Create a new handle to the GenMC model checker.
// The first call to this function determines the log level of GenMC, any future call with a different log level will panic.
pub fn create_genmc_driver_handle(
params: &GenmcParams,
genmc_log_level: LogLevel,
) -> UniquePtr<MiriGenmcShim> {
// SAFETY: Only setting the GenMC log level once is guaranteed by the `OnceLock`.
// No other place calls `set_log_level_raw`, so the `logLevel` value in GenMC will not change once we initialize it once.
// All functions that use GenMC's `logLevel` can only be accessed in safe Rust through a `MiriGenmcShim`.
// There is no way to get `MiriGenmcShim` other than through `create_handle`, and we only call it *after* setting the log level, preventing any possible data races.
assert_eq!(
&genmc_log_level,
GENMC_LOG_LEVEL.get_or_init(|| {
unsafe { set_log_level_raw(genmc_log_level) };
genmc_log_level
}),
"Attempt to change the GenMC log level after it was already set"
);
unsafe { MiriGenmcShim::create_handle(params) }
}
impl GenmcScalar {
pub const UNINIT: Self = Self { value: 0, is_init: false };
pub const fn from_u64(value: u64) -> Self {
Self { value, is_init: true }
}
}
impl Default for GenmcParams {
fn default() -> Self {
Self {
print_random_schedule_seed: false,
do_symmetry_reduction: false,
// FIXME(GenMC): Add defaults for remaining parameters
}
Self { print_random_schedule_seed: false, do_symmetry_reduction: false }
}
}
impl Default for LogLevel {
fn default() -> Self {
// FIXME(genmc): set `Tip` by default once the GenMC tips are relevant to Miri.
Self::Warning
}
}
impl FromStr for LogLevel {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"quiet" => LogLevel::Quiet,
"error" => LogLevel::Error,
"warning" => LogLevel::Warning,
"tip" => LogLevel::Tip,
"debug1" => LogLevel::Debug1Revisits,
"debug2" => LogLevel::Debug2MemoryAccesses,
"debug3" => LogLevel::Debug3ReadsFrom,
_ => return Err("invalid log level"),
})
}
}
#[cxx::bridge]
mod ffi {
/**** Types shared between Miri/Rust and Miri/C++ through cxx_bridge: ****/
/// Parameters that will be given to GenMC for setting up the model checker.
/// (The fields of this struct are visible to both Rust and C++)
#[derive(Clone, Debug)]
@ -20,11 +93,221 @@ mod ffi {
pub do_symmetry_reduction: bool,
// FIXME(GenMC): Add remaining parameters.
}
/// This is mostly equivalent to GenMC `VerbosityLevel`, but the debug log levels are always present (not conditionally compiled based on `ENABLE_GENMC_DEBUG`).
/// We add this intermediate type to prevent changes to the GenMC log-level from breaking the Miri
/// build, and to have a stable type for the C++-Rust interface, independent of `ENABLE_GENMC_DEBUG`.
#[derive(Debug)]
enum LogLevel {
/// Disable *all* logging (including error messages on a crash).
Quiet,
/// Log errors.
Error,
/// Log errors and warnings.
Warning,
/// Log errors, warnings and tips.
Tip,
/// Debug print considered revisits.
/// Downgraded to `Tip` if `GENMC_DEBUG` is not enabled.
Debug1Revisits,
/// Print the execution graph after every memory access.
/// Also includes the previous debug log level.
/// Downgraded to `Tip` if `GENMC_DEBUG` is not enabled.
Debug2MemoryAccesses,
/// Print reads-from values considered by GenMC.
/// Also includes the previous debug log level.
/// Downgraded to `Tip` if `GENMC_DEBUG` is not enabled.
Debug3ReadsFrom,
}
/// This type corresponds to `Option<SVal>` (or `std::optional<SVal>`), where `SVal` is the type that GenMC uses for storing values.
/// CXX doesn't support `std::optional` currently, so we need to use an extra `bool` to define whether this value is initialized or not.
#[derive(Debug, Clone, Copy)]
struct GenmcScalar {
value: u64,
is_init: bool,
}
#[must_use]
#[derive(Debug, Clone, Copy)]
enum ExecutionState {
Ok,
Blocked,
Finished,
}
#[must_use]
#[derive(Debug)]
struct SchedulingResult {
exec_state: ExecutionState,
next_thread: i32,
}
#[must_use]
#[derive(Debug)]
struct LoadResult {
/// If not null, contains the error encountered during the handling of the load.
error: UniquePtr<CxxString>,
/// Indicates whether a value was read or not.
has_value: bool,
/// The value that was read. Should not be used if `has_value` is `false`.
read_value: GenmcScalar,
}
#[must_use]
#[derive(Debug)]
struct StoreResult {
/// If not null, contains the error encountered during the handling of the store.
error: UniquePtr<CxxString>,
/// `true` if the write should also be reflected in Miri's memory representation.
is_coherence_order_maximal_write: bool,
}
/**** Types shared between Miri/Rust and GenMC/C++ through cxx_bridge: ****/
#[derive(Debug)]
/// Corresponds to GenMC's type with the same name.
/// Should only be modified if changed by GenMC.
enum ActionKind {
/// Any Mir terminator that's atomic and has load semantics.
Load,
/// Anything that's not a `Load`.
NonLoad,
}
#[derive(Debug)]
/// Corresponds to GenMC's type with the same name.
/// Should only be modified if changed by GenMC.
enum MemOrdering {
NotAtomic = 0,
Relaxed = 1,
// We skip 2 in case we support consume.
Acquire = 3,
Release = 4,
AcquireRelease = 5,
SequentiallyConsistent = 6,
}
// # Safety
//
// This block is unsafe to allow defining safe methods inside.
//
// `get_global_alloc_static_mask` is safe since it just returns a constant.
// All methods on `MiriGenmcShim` are safe by the correct usage of the two unsafe functions
// `set_log_level_raw` and `MiriGenmcShim::create_handle`.
// See the doc comment on those two functions for their safety requirements.
unsafe extern "C++" {
include!("MiriInterface.hpp");
type MiriGenMCShim;
/**** Types shared between Miri/Rust and Miri/C++: ****/
type MiriGenmcShim;
fn createGenmcHandle(config: &GenmcParams) -> UniquePtr<MiriGenMCShim>;
/**** Types shared between Miri/Rust and GenMC/C++: ****/
type ActionKind;
type MemOrdering;
/// Set the log level for GenMC.
///
/// # Safety
///
/// This function is not thread safe, since it writes to the global, mutable, non-atomic `logLevel` variable.
/// Any GenMC function may read from `logLevel` unsynchronized.
/// The safest way to use this function is to set the log level exactly once before first calling `create_handle`.
/// Never calling this function is safe, GenMC will fall back to its default log level.
unsafe fn set_log_level_raw(log_level: LogLevel);
/// Create a new `MiriGenmcShim`, which wraps a `GenMCDriver`.
///
/// # Safety
///
/// This function is marked as unsafe since the `logLevel` global variable is non-atomic.
/// This function should not be called in an unsynchronized way with `set_log_level_raw`, since
/// this function and any methods on the returned `MiriGenmcShim` may read the `logLevel`,
/// causing a data race.
/// The safest way to use these functions is to call `set_log_level_raw` once, and only then
/// start creating handles.
/// There should not be any other (safe) way to create a `MiriGenmcShim`.
#[Self = "MiriGenmcShim"]
unsafe fn create_handle(params: &GenmcParams) -> UniquePtr<MiriGenmcShim>;
/// Get the bit mask that GenMC expects for global memory allocations.
fn get_global_alloc_static_mask() -> u64;
/// This function must be called at the start of any execution, before any events are reported to GenMC.
fn handle_execution_start(self: Pin<&mut MiriGenmcShim>);
/// This function must be called at the end of any execution, even if an error was found during the execution.
/// Returns `null`, or a string containing an error message if an error occured.
fn handle_execution_end(self: Pin<&mut MiriGenmcShim>) -> UniquePtr<CxxString>;
/***** Functions for handling events encountered during program execution. *****/
/**** Memory access handling ****/
fn handle_load(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
size: u64,
memory_ordering: MemOrdering,
old_value: GenmcScalar,
) -> LoadResult;
fn handle_store(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
size: u64,
value: GenmcScalar,
old_value: GenmcScalar,
memory_ordering: MemOrdering,
) -> StoreResult;
/**** Memory (de)allocation ****/
fn handle_malloc(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
size: u64,
alignment: u64,
) -> u64;
fn handle_free(self: Pin<&mut MiriGenmcShim>, thread_id: i32, address: u64);
/**** Thread management ****/
fn handle_thread_create(self: Pin<&mut MiriGenmcShim>, thread_id: i32, parent_id: i32);
fn handle_thread_join(self: Pin<&mut MiriGenmcShim>, thread_id: i32, child_id: i32);
fn handle_thread_finish(self: Pin<&mut MiriGenmcShim>, thread_id: i32, ret_val: u64);
fn handle_thread_kill(self: Pin<&mut MiriGenmcShim>, thread_id: i32);
/***** Exploration related functionality *****/
/// Ask the GenMC scheduler for a new thread to schedule and
/// return whether the execution is finished, blocked, or can continue.
/// Updates the next instruction kind for the given thread id.
fn schedule_next(
self: Pin<&mut MiriGenmcShim>,
curr_thread_id: i32,
curr_thread_next_instr_kind: ActionKind,
) -> SchedulingResult;
/// Check whether there are more executions to explore.
/// If there are more executions, this method prepares for the next execution and returns `true`.
fn is_exploration_done(self: Pin<&mut MiriGenmcShim>) -> bool;
/**** Result querying functionality. ****/
// NOTE: We don't want to share the `VerificationResult` type with the Rust side, since it
// is very large, uses features that CXX.rs doesn't support and may change as GenMC changes.
// Instead, we only use the result on the C++ side, and only expose these getter function to
// the Rust side.
// Each `GenMCDriver` contains one `VerificationResult`, and each `MiriGenmcShim` contains on `GenMCDriver`.
// GenMC builds up the content of the `struct VerificationResult` over the course of an exploration,
// but it's safe to look at it at any point, since it is only accessible through exactly one `MiriGenmcShim`.
// All these functions for querying the result can be safely called repeatedly and at any time,
// though the results may be incomplete if called before `handle_execution_end`.
/// Get the number of blocked executions encountered by GenMC (cast into a fixed with integer)
fn get_blocked_execution_count(self: &MiriGenmcShim) -> u64;
/// Get the number of executions explored by GenMC (cast into a fixed with integer)
fn get_explored_execution_count(self: &MiriGenmcShim) -> u64;
/// Get all messages that GenMC produced (errors, warnings), combined into one string.
fn get_result_message(self: &MiriGenmcShim) -> UniquePtr<CxxString>;
/// If an error occurred, return a string describing the error, otherwise, return `nullptr`.
fn get_error_string(self: &MiriGenmcShim) -> UniquePtr<CxxString>;
}
}

View file

@ -1,50 +0,0 @@
#include "MiriInterface.hpp"
#include "genmc-sys/src/lib.rs.h"
auto MiriGenMCShim::createHandle(const GenmcParams &config)
-> std::unique_ptr<MiriGenMCShim>
{
auto conf = std::make_shared<Config>();
// Miri needs all threads to be replayed, even fully completed ones.
conf->replayCompletedThreads = true;
// We only support the RC11 memory model for Rust.
conf->model = ModelType::RC11;
conf->printRandomScheduleSeed = config.print_random_schedule_seed;
// FIXME(genmc): disable any options we don't support currently:
conf->ipr = false;
conf->disableBAM = true;
conf->instructionCaching = false;
ERROR_ON(config.do_symmetry_reduction, "Symmetry reduction is currently unsupported in GenMC mode.");
conf->symmetryReduction = config.do_symmetry_reduction;
// FIXME(genmc): Should there be a way to change this option from Miri?
conf->schedulePolicy = SchedulePolicy::WF;
// FIXME(genmc): implement estimation mode:
conf->estimate = false;
conf->estimationMax = 1000;
const auto mode = conf->estimate ? GenMCDriver::Mode(GenMCDriver::EstimationMode{})
: GenMCDriver::Mode(GenMCDriver::VerificationMode{});
// Running Miri-GenMC without race detection is not supported.
// Disabling this option also changes the behavior of the replay scheduler to only schedule at atomic operations, which is required with Miri.
// This happens because Miri can generate multiple GenMC events for a single MIR terminator. Without this option,
// the scheduler might incorrectly schedule an atomic MIR terminator because the first event it creates is a non-atomic (e.g., `StorageLive`).
conf->disableRaceDetection = false;
// Miri can already check for unfreed memory. Also, GenMC cannot distinguish between memory
// that is allowed to leak and memory that is not.
conf->warnUnfreedMemory = false;
// FIXME(genmc): check config:
// checkConfigOptions(*conf);
auto driver = std::make_unique<MiriGenMCShim>(std::move(conf), mode);
return driver;
}

View file

@ -1,44 +0,0 @@
#ifndef GENMC_MIRI_INTERFACE_HPP
#define GENMC_MIRI_INTERFACE_HPP
#include "rust/cxx.h"
#include "config.h"
#include "Config/Config.hpp"
#include "Verification/GenMCDriver.hpp"
#include <iostream>
/**** Types available to Miri ****/
// Config struct defined on the Rust side and translated to C++ by cxx.rs:
struct GenmcParams;
struct MiriGenMCShim : private GenMCDriver
{
public:
MiriGenMCShim(std::shared_ptr<const Config> conf, Mode mode /* = VerificationMode{} */)
: GenMCDriver(std::move(conf), nullptr, mode)
{
std::cerr << "C++: GenMC handle created!" << std::endl;
}
virtual ~MiriGenMCShim()
{
std::cerr << "C++: GenMC handle destroyed!" << std::endl;
}
static std::unique_ptr<MiriGenMCShim> createHandle(const GenmcParams &config);
};
/**** Functions available to Miri ****/
// NOTE: CXX doesn't support exposing static methods to Rust currently, so we expose this function instead.
static inline auto createGenmcHandle(const GenmcParams &config) -> std::unique_ptr<MiriGenMCShim>
{
return MiriGenMCShim::createHandle(config);
}
#endif /* GENMC_MIRI_INTERFACE_HPP */

View file

@ -0,0 +1,78 @@
use std::ops::Range;
use rand::Rng;
use rustc_abi::{Align, Size};
use rustc_const_eval::interpret::{InterpResult, interp_ok};
use rustc_middle::{err_exhaust, throw_exhaust};
/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
/// of `align` that is larger or equal to `addr`
fn align_addr(addr: u64, align: u64) -> u64 {
match addr % align {
0 => addr,
rem => addr.strict_add(align) - rem,
}
}
/// This provides the logic to generate addresses for memory allocations in a given address range.
#[derive(Debug)]
pub struct AddressGenerator {
/// This is used as a memory address when a new pointer is casted to an integer. It
/// is always larger than any address that was previously made part of a block.
next_base_addr: u64,
/// This is the last address that can be allocated.
end: u64,
}
impl AddressGenerator {
pub fn new(addr_range: Range<u64>) -> Self {
Self { next_base_addr: addr_range.start, end: addr_range.end }
}
/// Get the remaining range where this `AddressGenerator` can still allocate addresses.
pub fn get_remaining(&self) -> Range<u64> {
self.next_base_addr..self.end
}
/// Generate a new address with the specified size and alignment, using the given Rng to add some randomness.
/// The returned allocation is guaranteed not to overlap with any address ranges given out by the generator before.
/// Returns an error if the allocation request cannot be fulfilled.
pub fn generate<'tcx, R: Rng>(
&mut self,
size: Size,
align: Align,
rng: &mut R,
) -> InterpResult<'tcx, u64> {
// Leave some space to the previous allocation, to give it some chance to be less aligned.
// We ensure that `(self.next_base_addr + slack) % 16` is uniformly distributed.
let slack = rng.random_range(0..16);
// From next_base_addr + slack, round up to adjust for alignment.
let base_addr =
self.next_base_addr.checked_add(slack).ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
let base_addr = align_addr(base_addr, align.bytes());
// Remember next base address. If this allocation is zero-sized, leave a gap of at
// least 1 to avoid two allocations having the same base address. (The logic in
// `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
// need to be distinguishable!)
self.next_base_addr = base_addr
.checked_add(size.bytes().max(1))
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
// Even if `Size` didn't overflow, we might still have filled up the address space.
if self.next_base_addr > self.end {
throw_exhaust!(AddressSpaceFull);
}
interp_ok(base_addr)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_align_addr() {
assert_eq!(align_addr(37, 4), 40);
assert_eq!(align_addr(44, 4), 44);
}
}

View file

@ -1,15 +1,16 @@
//! This module is responsible for managing the absolute addresses that allocations are located at,
//! and for casting between pointers and integers based on those addresses.
mod address_generator;
mod reuse_pool;
use std::cell::RefCell;
use std::cmp::max;
use rand::Rng;
use rustc_abi::{Align, Size};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::ty::TyCtxt;
pub use self::address_generator::AddressGenerator;
use self::reuse_pool::ReusePool;
use crate::concurrency::VClock;
use crate::*;
@ -33,6 +34,8 @@ pub struct GlobalStateInner {
/// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset
/// from the base address, and we need to find the `AllocId` it belongs to. This is not the
/// *full* inverse of `base_addr`; dead allocations have been removed.
/// Note that in GenMC mode, dead allocations are *not* removed -- and also, addresses are never
/// reused. This lets us use the address as a cross-execution-stable identifier for an allocation.
int_to_ptr_map: Vec<(u64, AllocId)>,
/// The base address for each allocation. We cannot put that into
/// `AllocExtra` because function pointers also have a base address, and
@ -49,9 +52,8 @@ pub struct GlobalStateInner {
/// Whether an allocation has been exposed or not. This cannot be put
/// into `AllocExtra` for the same reason as `base_addr`.
exposed: FxHashSet<AllocId>,
/// This is used as a memory address when a new pointer is casted to an integer. It
/// is always larger than any address that was previously made part of a block.
next_base_addr: u64,
/// The generator for new addresses in a given range.
address_generator: AddressGenerator,
/// The provenance to use for int2ptr casts
provenance_mode: ProvenanceMode,
}
@ -64,7 +66,7 @@ impl VisitProvenance for GlobalStateInner {
prepared_alloc_bytes: _,
reuse: _,
exposed: _,
next_base_addr: _,
address_generator: _,
provenance_mode: _,
} = self;
// Though base_addr, int_to_ptr_map, and exposed contain AllocIds, we do not want to visit them.
@ -77,14 +79,14 @@ impl VisitProvenance for GlobalStateInner {
}
impl GlobalStateInner {
pub fn new(config: &MiriConfig, stack_addr: u64) -> Self {
pub fn new<'tcx>(config: &MiriConfig, stack_addr: u64, tcx: TyCtxt<'tcx>) -> Self {
GlobalStateInner {
int_to_ptr_map: Vec::default(),
base_addr: FxHashMap::default(),
prepared_alloc_bytes: FxHashMap::default(),
reuse: ReusePool::new(config),
exposed: FxHashSet::default(),
next_base_addr: stack_addr,
address_generator: AddressGenerator::new(stack_addr..tcx.target_usize_max()),
provenance_mode: config.provenance_mode,
}
}
@ -96,15 +98,6 @@ impl GlobalStateInner {
}
}
/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
/// of `align` that is larger or equal to `addr`
fn align_addr(addr: u64, align: u64) -> u64 {
match addr % align {
0 => addr,
rem => addr.strict_add(align) - rem,
}
}
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn addr_from_alloc_id_uncached(
@ -132,7 +125,8 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Miri's address assignment leaks state across thread boundaries, which is incompatible
// with GenMC execution. So we instead let GenMC assign addresses to allocations.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
let addr = genmc_ctx.handle_alloc(&this.machine, info.size, info.align, memory_kind)?;
let addr =
genmc_ctx.handle_alloc(this, alloc_id, info.size, info.align, memory_kind)?;
return interp_ok(addr);
}
@ -189,39 +183,22 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.active_thread(),
) {
if let Some(clock) = clock {
this.acquire_clock(&clock);
this.acquire_clock(&clock)?;
}
interp_ok(reuse_addr)
} else {
// We have to pick a fresh address.
// Leave some space to the previous allocation, to give it some chance to be less aligned.
// We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
let slack = rng.random_range(0..16);
// From next_base_addr + slack, round up to adjust for alignment.
let base_addr = global_state
.next_base_addr
.checked_add(slack)
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
let base_addr = align_addr(base_addr, info.align.bytes());
let new_addr =
global_state.address_generator.generate(info.size, info.align, &mut rng)?;
// Remember next base address. If this allocation is zero-sized, leave a gap of at
// least 1 to avoid two allocations having the same base address. (The logic in
// `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
// need to be distinguishable!)
global_state.next_base_addr = base_addr
.checked_add(max(info.size.bytes(), 1))
.ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
// Even if `Size` didn't overflow, we might still have filled up the address space.
if global_state.next_base_addr > this.target_usize_max() {
throw_exhaust!(AddressSpaceFull);
}
// If we filled up more than half the address space, start aggressively reusing
// addresses to avoid running out.
if global_state.next_base_addr > u64::try_from(this.target_isize_max()).unwrap() {
let remaining_range = global_state.address_generator.get_remaining();
if remaining_range.start > remaining_range.end / 2 {
global_state.reuse.address_space_shortage();
}
interp_ok(base_addr)
interp_ok(new_addr)
}
}
}
@ -268,7 +245,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// We only use this provenance if it has been exposed, or if the caller requested also non-exposed allocations
if !only_exposed_allocations || global_state.exposed.contains(&alloc_id) {
// This must still be live, since we remove allocations from `int_to_ptr_map` when they get freed.
debug_assert!(this.is_alloc_live(alloc_id));
// In GenMC mode, we keep all allocations, so this check doesn't apply there.
if this.machine.data_race.as_genmc_ref().is_none() {
debug_assert!(this.is_alloc_live(alloc_id));
}
Some(alloc_id)
} else {
None
@ -485,6 +465,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
impl<'tcx> MiriMachine<'tcx> {
pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
// In GenMC mode, we can't remove dead allocation info since such pointers can
// still be stored in atomics and we need this info to convert GenMC pointers to Miri pointers.
// `global_state.reuse` is also unused so we can just skip this entire function.
if self.data_race.as_genmc_ref().is_some() {
return;
}
let global_state = self.alloc_addresses.get_mut();
let rng = self.rng.get_mut();
@ -519,14 +506,3 @@ impl<'tcx> MiriMachine<'tcx> {
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_align_addr() {
assert_eq!(align_addr(37, 4), 40);
assert_eq!(align_addr(44, 4), 44);
}
}

View file

@ -39,7 +39,7 @@ use std::sync::atomic::{AtomicI32, AtomicU32, Ordering};
use miri::{
BacktraceStyle, BorrowTrackerMethod, GenmcConfig, GenmcCtx, MiriConfig, MiriEntryFnType,
ProvenanceMode, RetagFields, TreeBorrowsParams, ValidationMode,
ProvenanceMode, RetagFields, TreeBorrowsParams, ValidationMode, run_genmc_mode,
};
use rustc_abi::ExternAbi;
use rustc_data_structures::sync;
@ -187,9 +187,18 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
}
if let Some(_genmc_config) = &config.genmc_config {
let _genmc_ctx = Rc::new(GenmcCtx::new(&config));
let eval_entry_once = |genmc_ctx: Rc<GenmcCtx>| {
miri::eval_entry(tcx, entry_def_id, entry_type, &config, Some(genmc_ctx))
};
todo!("GenMC mode not yet implemented");
// FIXME(genmc): add estimation mode here.
let return_code = run_genmc_mode(&config, eval_entry_once, tcx).unwrap_or_else(|| {
tcx.dcx().abort_if_errors();
rustc_driver::EXIT_FAILURE
});
exit(return_code);
};
if let Some(many_seeds) = self.many_seeds.take() {
@ -739,9 +748,11 @@ fn main() {
// Validate settings for data race detection and GenMC mode.
if miri_config.genmc_config.is_some() {
if !miri_config.data_race_detector {
fatal_error!("Cannot disable data race detection in GenMC mode (currently)");
fatal_error!("Cannot disable data race detection in GenMC mode");
} else if !miri_config.weak_memory_emulation {
fatal_error!("Cannot disable weak memory emulation in GenMC mode");
} else if !miri_config.native_lib.is_empty() {
fatal_error!("native-lib not supported in GenMC mode.");
}
if miri_config.borrow_tracker.is_some() {
eprintln!(
@ -749,6 +760,9 @@ fn main() {
);
miri_config.borrow_tracker = None;
}
// We enable fixed scheduling so Miri doesn't randomly yield before a terminator, which anyway
// would be a NOP in GenMC mode.
miri_config.fixed_scheduling = true;
} else if miri_config.weak_memory_emulation && !miri_config.data_race_detector {
fatal_error!(
"Weak memory emulation cannot be enabled when the data race detector is disabled"

View file

@ -740,6 +740,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
if let Some(access) = access {
assert_eq!(access, AccessKind::Write);
// Make sure the data race model also knows about this.
// FIXME(genmc): Ensure this is still done in GenMC mode. Check for other places where GenMC may need to be informed.
if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() {
data_race.write(
alloc_id,

View file

@ -719,8 +719,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// Only metadata on the location itself is used.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
// FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
let old_val = None;
let old_val = this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err();
return genmc_ctx.atomic_load(
this,
place.ptr().addr(),
@ -751,11 +750,22 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// The program didn't actually do a read, so suppress the memory access hooks.
// This is also a very special exception where we just ignore an error -- if this read
// was UB e.g. because the memory is uninitialized, we don't want to know!
let old_val = this.run_for_validation_mut(|this| this.read_scalar(dest)).discard_err();
let old_val = this.run_for_validation_ref(|this| this.read_scalar(dest)).discard_err();
// Inform GenMC about the atomic store.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
// FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
genmc_ctx.atomic_store(this, dest.ptr().addr(), dest.layout.size, val, atomic)?;
if genmc_ctx.atomic_store(
this,
dest.ptr().addr(),
dest.layout.size,
val,
old_val,
atomic,
)? {
// The store might be the latest store in coherence order (determined by GenMC).
// If it is, we need to update the value in Miri's memory:
this.allow_data_races_mut(|this| this.write_scalar(val, dest))?;
}
return interp_ok(());
}
this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
@ -779,7 +789,6 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// Inform GenMC about the atomic rmw operation.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
// FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
this,
place.ptr().addr(),
@ -787,8 +796,11 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
atomic,
(op, not),
rhs.to_scalar(),
old.to_scalar(),
)?;
this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
if let Some(new_val) = new_val {
this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
}
return interp_ok(ImmTy::from_scalar(old_val, old.layout));
}
@ -818,14 +830,19 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// Inform GenMC about the atomic atomic exchange.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
// FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
let (old_val, _is_success) = genmc_ctx.atomic_exchange(
let (old_val, new_val) = genmc_ctx.atomic_exchange(
this,
place.ptr().addr(),
place.layout.size,
new,
atomic,
old,
)?;
// The store might be the latest store in coherence order (determined by GenMC).
// If it is, we need to update the value in Miri's memory:
if let Some(new_val) = new_val {
this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
}
return interp_ok(old_val);
}
@ -851,7 +868,6 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
// Inform GenMC about the atomic min/max operation.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
// FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
let (old_val, new_val) = genmc_ctx.atomic_min_max_op(
this,
place.ptr().addr(),
@ -860,8 +876,13 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
min,
old.layout.backend_repr.is_signed(),
rhs.to_scalar(),
old.to_scalar(),
)?;
this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
// The store might be the latest store in coherence order (determined by GenMC).
// If it is, we need to update the value in Miri's memory:
if let Some(new_val) = new_val {
this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
}
return interp_ok(ImmTy::from_scalar(old_val, old.layout));
}
@ -903,15 +924,12 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
this.atomic_access_check(place, AtomicAccessType::Rmw)?;
// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successful then try again with the success
// read ordering and write in the success case.
// Read as immediate for the sake of `binary_op()`
let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
// Inform GenMC about the atomic atomic compare exchange.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
let (old, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
let (old_value, new_value, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
this,
place.ptr().addr(),
place.layout.size,
@ -920,11 +938,15 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
success,
fail,
can_fail_spuriously,
old.to_scalar(),
)?;
if cmpxchg_success {
this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
// The store might be the latest store in coherence order (determined by GenMC).
// If it is, we need to update the value in Miri's memory:
if let Some(new_value) = new_value {
this.allow_data_races_mut(|this| this.write_scalar(new_value, place))?;
}
return interp_ok(Immediate::ScalarPair(old, Scalar::from_bool(cmpxchg_success)));
return interp_ok(Immediate::ScalarPair(old_value, Scalar::from_bool(cmpxchg_success)));
}
// `binary_op` will bail if either of them is not a scalar.
@ -985,11 +1007,16 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
/// Acquire the given clock into the current thread, establishing synchronization with
/// the moment when that clock snapshot was taken via `release_clock`.
fn acquire_clock(&self, clock: &VClock) {
fn acquire_clock(&self, clock: &VClock) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
data_race.acquire_clock(clock, &this.machine.threads);
match &this.machine.data_race {
GlobalDataRaceHandler::None => {}
GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
throw_unsup_format!("acquire_clock is not (yet) supported in GenMC mode."),
GlobalDataRaceHandler::Vclocks(data_race) =>
data_race.acquire_clock(clock, &this.machine.threads),
}
interp_ok(())
}
}

View file

@ -1,13 +1,14 @@
use super::GenmcParams;
use genmc_sys::{GenmcParams, LogLevel};
/// Configuration for GenMC mode.
/// The `params` field is shared with the C++ side.
/// The remaining options are kept on the Rust side.
#[derive(Debug, Default, Clone)]
pub struct GenmcConfig {
/// Parameters sent to the C++ side to create a new handle to the GenMC model checker.
pub(super) params: GenmcParams,
do_estimation: bool,
// FIXME(GenMC): add remaining options.
/// The log level for GenMC.
pub(super) log_level: LogLevel,
}
impl GenmcConfig {
@ -29,7 +30,15 @@ impl GenmcConfig {
if trimmed_arg.is_empty() {
return Ok(()); // this corresponds to "-Zmiri-genmc"
}
// FIXME(GenMC): implement remaining parameters.
todo!();
let genmc_config = genmc_config.as_mut().unwrap();
let Some(trimmed_arg) = trimmed_arg.strip_prefix("-") else {
return Err(format!("Invalid GenMC argument \"-Zmiri-genmc{trimmed_arg}\""));
};
if let Some(log_level) = trimmed_arg.strip_prefix("log=") {
genmc_config.log_level = log_level.parse()?;
} else {
return Err(format!("Invalid GenMC argument: \"-Zmiri-genmc-{trimmed_arg}\""));
}
Ok(())
}
}

View file

@ -1,30 +1,49 @@
#![allow(unused)]
use rustc_abi::{Align, Size};
use rustc_const_eval::interpret::{InterpCx, InterpResult};
use rustc_const_eval::interpret::{AllocId, InterpCx, InterpResult};
use rustc_middle::mir;
pub use self::run::run_genmc_mode;
use crate::{
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig,
MiriMachine, Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith,
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriMachine, Scalar,
ThreadId, ThreadManager, VisitProvenance, VisitWith,
};
#[derive(Clone, Copy, Debug)]
pub enum ExitType {
MainThreadFinish,
ExitCalled,
}
#[derive(Debug)]
pub struct GenmcCtx {}
#[derive(Debug, Default, Clone)]
pub struct GenmcConfig {}
mod run {
use std::rc::Rc;
use rustc_middle::ty::TyCtxt;
use crate::{GenmcCtx, MiriConfig};
pub fn run_genmc_mode<'tcx>(
_config: &MiriConfig,
_eval_entry: impl Fn(Rc<GenmcCtx>) -> Option<i32>,
_tcx: TyCtxt<'tcx>,
) -> Option<i32> {
unreachable!();
}
}
impl GenmcCtx {
pub fn new(_miri_config: &MiriConfig) -> Self {
// We don't provide the `new` function in the dummy module.
pub fn get_blocked_execution_count(&self) -> usize {
unreachable!()
}
pub fn get_stuck_execution_count(&self) -> usize {
unreachable!()
}
pub fn print_genmc_graph(&self) {
pub fn get_explored_execution_count(&self) -> usize {
unreachable!()
}
@ -34,17 +53,6 @@ impl GenmcCtx {
/**** Memory access handling ****/
pub(crate) fn handle_execution_start(&self) {
unreachable!()
}
pub(crate) fn handle_execution_end<'tcx>(
&self,
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> Result<(), String> {
unreachable!()
}
pub(super) fn set_ongoing_action_data_race_free(&self, _enable: bool) {
unreachable!()
}
@ -67,8 +75,9 @@ impl GenmcCtx {
_address: Size,
_size: Size,
_value: Scalar,
_old_value: Option<Scalar>,
_ordering: AtomicWriteOrd,
) -> InterpResult<'tcx, ()> {
) -> InterpResult<'tcx, bool> {
unreachable!()
}
@ -76,7 +85,7 @@ impl GenmcCtx {
&self,
_machine: &MiriMachine<'tcx>,
_ordering: AtomicFenceOrd,
) -> InterpResult<'tcx, ()> {
) -> InterpResult<'tcx> {
unreachable!()
}
@ -86,22 +95,24 @@ impl GenmcCtx {
_address: Size,
_size: Size,
_ordering: AtomicRwOrd,
(rmw_op, not): (mir::BinOp, bool),
(_rmw_op, _not): (mir::BinOp, bool),
_rhs_scalar: Scalar,
) -> InterpResult<'tcx, (Scalar, Scalar)> {
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>)> {
unreachable!()
}
pub(crate) fn atomic_min_max_op<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
address: Size,
size: Size,
ordering: AtomicRwOrd,
min: bool,
is_signed: bool,
rhs_scalar: Scalar,
) -> InterpResult<'tcx, (Scalar, Scalar)> {
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
_address: Size,
_size: Size,
_ordering: AtomicRwOrd,
_min: bool,
_is_signed: bool,
_rhs_scalar: Scalar,
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>)> {
unreachable!()
}
@ -112,7 +123,8 @@ impl GenmcCtx {
_size: Size,
_rhs_scalar: Scalar,
_ordering: AtomicRwOrd,
) -> InterpResult<'tcx, (Scalar, bool)> {
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>)> {
unreachable!()
}
@ -126,7 +138,8 @@ impl GenmcCtx {
_success: AtomicRwOrd,
_fail: AtomicReadOrd,
_can_fail_spuriously: bool,
) -> InterpResult<'tcx, (Scalar, bool)> {
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>, bool)> {
unreachable!()
}
@ -135,7 +148,7 @@ impl GenmcCtx {
_machine: &MiriMachine<'tcx>,
_address: Size,
_size: Size,
) -> InterpResult<'tcx, ()> {
) -> InterpResult<'tcx> {
unreachable!()
}
@ -144,7 +157,7 @@ impl GenmcCtx {
_machine: &MiriMachine<'tcx>,
_address: Size,
_size: Size,
) -> InterpResult<'tcx, ()> {
) -> InterpResult<'tcx> {
unreachable!()
}
@ -152,7 +165,8 @@ impl GenmcCtx {
pub(crate) fn handle_alloc<'tcx>(
&self,
_machine: &MiriMachine<'tcx>,
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
_alloc_id: AllocId,
_size: Size,
_alignment: Align,
_memory_kind: MemoryKind,
@ -163,11 +177,10 @@ impl GenmcCtx {
pub(crate) fn handle_dealloc<'tcx>(
&self,
_machine: &MiriMachine<'tcx>,
_alloc_id: AllocId,
_address: Size,
_size: Size,
_align: Align,
_kind: MemoryKind,
) -> InterpResult<'tcx, ()> {
) -> InterpResult<'tcx> {
unreachable!()
}
@ -176,8 +189,10 @@ impl GenmcCtx {
pub(crate) fn handle_thread_create<'tcx>(
&self,
_threads: &ThreadManager<'tcx>,
_start_routine: crate::Pointer,
_func_arg: &crate::ImmTy<'tcx>,
_new_thread_id: ThreadId,
) -> InterpResult<'tcx, ()> {
) -> InterpResult<'tcx> {
unreachable!()
}
@ -185,24 +200,26 @@ impl GenmcCtx {
&self,
_active_thread_id: ThreadId,
_child_thread_id: ThreadId,
) -> InterpResult<'tcx, ()> {
) -> InterpResult<'tcx> {
unreachable!()
}
pub(crate) fn handle_thread_stack_empty(&self, _thread_id: ThreadId) {
pub(crate) fn handle_thread_finish<'tcx>(&self, _threads: &ThreadManager<'tcx>) {
unreachable!()
}
pub(crate) fn handle_thread_finish<'tcx>(
pub(crate) fn handle_exit<'tcx>(
&self,
_threads: &ThreadManager<'tcx>,
) -> InterpResult<'tcx, ()> {
_thread: ThreadId,
_exit_code: i32,
_exit_type: ExitType,
) -> InterpResult<'tcx> {
unreachable!()
}
/**** Scheduling functionality ****/
pub(crate) fn schedule_thread<'tcx>(
pub fn schedule_thread<'tcx>(
&self,
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> InterpResult<'tcx, ThreadId> {
@ -211,6 +228,7 @@ impl GenmcCtx {
/**** Blocking instructions ****/
#[allow(unused)]
pub(crate) fn handle_verifier_assume<'tcx>(
&self,
_machine: &MiriMachine<'tcx>,
@ -229,7 +247,7 @@ impl VisitProvenance for GenmcCtx {
impl GenmcConfig {
pub fn parse_arg(
_genmc_config: &mut Option<GenmcConfig>,
trimmed_arg: &str,
_trimmed_arg: &str,
) -> Result<(), String> {
if cfg!(feature = "genmc") {
Err(format!("GenMC is disabled in this build of Miri"))
@ -237,8 +255,4 @@ impl GenmcConfig {
Err(format!("GenMC is not supported on this target"))
}
}
pub fn should_print_graph(&self, _rep: usize) -> bool {
unreachable!()
}
}

View file

@ -0,0 +1,118 @@
use std::collections::hash_map::Entry;
use std::sync::RwLock;
use genmc_sys::{GENMC_GLOBAL_ADDRESSES_MASK, get_global_alloc_static_mask};
use rand::SeedableRng;
use rand::rngs::StdRng;
use rustc_const_eval::interpret::{AllocId, AllocInfo, InterpResult, interp_ok};
use rustc_data_structures::fx::FxHashMap;
use tracing::debug;
use crate::alloc_addresses::AddressGenerator;
#[derive(Debug)]
struct GlobalStateInner {
/// The base address for each *global* allocation.
base_addr: FxHashMap<AllocId, u64>,
/// We use the same address generator that Miri uses in normal operation.
address_generator: AddressGenerator,
/// The address generator needs an Rng to randomize the offsets between allocations.
/// We don't use the `MiriMachine` Rng since this is global, cross-machine state.
rng: StdRng,
}
/// Allocator for global memory in GenMC mode.
/// Miri doesn't discover all global allocations statically like LLI does for GenMC.
/// The existing global memory allocator in GenMC doesn't support this, so we take over these allocations.
/// Global allocations need to be in a specific address range, with the lower limit given by the `GENMC_GLOBAL_ADDRESSES_MASK` constant.
///
/// Every global allocation must have the same addresses across all executions of a single program.
/// Therefore there is only 1 global allocator, and it syncs new globals across executions, even if they are explored in parallel.
#[derive(Debug)]
pub struct GlobalAllocationHandler(RwLock<GlobalStateInner>);
impl GlobalAllocationHandler {
/// Create a new global address generator with a given max address `last_addr`
/// (corresponding to the highest address available on the target platform, unless another limit exists).
/// No addresses higher than this will be allocated.
/// Will panic if the given address limit is too small to allocate any addresses.
pub fn new(last_addr: u64) -> GlobalAllocationHandler {
assert_eq!(GENMC_GLOBAL_ADDRESSES_MASK, get_global_alloc_static_mask());
assert_ne!(GENMC_GLOBAL_ADDRESSES_MASK, 0);
// FIXME(genmc): Remove if non-64bit targets are supported.
assert!(
GENMC_GLOBAL_ADDRESSES_MASK < last_addr,
"only 64bit platforms are currently supported (highest address {last_addr:#x} <= minimum global address {GENMC_GLOBAL_ADDRESSES_MASK:#x})."
);
Self(RwLock::new(GlobalStateInner {
base_addr: FxHashMap::default(),
address_generator: AddressGenerator::new(GENMC_GLOBAL_ADDRESSES_MASK..last_addr),
// FIXME(genmc): We could provide a way to changes this seed, to allow for different global addresses.
rng: StdRng::seed_from_u64(0),
}))
}
}
impl GlobalStateInner {
fn global_allocate_addr<'tcx>(
&mut self,
alloc_id: AllocId,
info: AllocInfo,
) -> InterpResult<'tcx, u64> {
let entry = match self.base_addr.entry(alloc_id) {
Entry::Occupied(occupied_entry) => {
// Looks like some other thread allocated this for us
// between when we released the read lock and aquired the write lock,
// so we just return that value.
return interp_ok(*occupied_entry.get());
}
Entry::Vacant(vacant_entry) => vacant_entry,
};
// This allocation does not have a base address yet, pick or reuse one.
// We are not in native lib mode (incompatible with GenMC mode), so we control the addresses ourselves.
let new_addr = self.address_generator.generate(info.size, info.align, &mut self.rng)?;
// Cache the address for future use.
entry.insert(new_addr);
interp_ok(new_addr)
}
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Allocate a new address for the given alloc id, or return the cached address.
/// Each alloc id is assigned one unique allocation which will not change if this function is called again with the same alloc id.
fn get_global_allocation_address(
&self,
global_allocation_handler: &GlobalAllocationHandler,
alloc_id: AllocId,
) -> InterpResult<'tcx, u64> {
let this = self.eval_context_ref();
let info = this.get_alloc_info(alloc_id);
let global_state = global_allocation_handler.0.read().unwrap();
if let Some(base_addr) = global_state.base_addr.get(&alloc_id) {
debug!(
"GenMC: address for global with alloc id {alloc_id:?} was cached: {base_addr} == {base_addr:#x}"
);
return interp_ok(*base_addr);
}
// We need to upgrade to a write lock. `std::sync::RwLock` doesn't support this, so we drop the guard and lock again
// Note that another thread might allocate the address while the `RwLock` is unlocked, but we handle this case in the allocation function.
drop(global_state);
let mut global_state = global_allocation_handler.0.write().unwrap();
// With the write lock, we can safely allocate an address only once per `alloc_id`.
let new_addr = global_state.global_allocate_addr(alloc_id, info)?;
debug!("GenMC: global with alloc id {alloc_id:?} got address: {new_addr} == {new_addr:#x}");
assert_eq!(
GENMC_GLOBAL_ADDRESSES_MASK,
new_addr & GENMC_GLOBAL_ADDRESSES_MASK,
"Global address allocated outside global address space."
);
interp_ok(new_addr)
}
}

View file

@ -0,0 +1,97 @@
use genmc_sys::MemOrdering;
use rustc_abi::Size;
use rustc_const_eval::interpret::{InterpResult, interp_ok};
use rustc_middle::ty::ScalarInt;
use tracing::debug;
use super::GenmcScalar;
use crate::{AtomicReadOrd, AtomicWriteOrd, MiriInterpCx, Scalar, throw_unsup_format};
/// Maximum size memory access in bytes that GenMC supports.
pub(super) const MAX_ACCESS_SIZE: u64 = 8;
/// This function is used to split up a large memory access into aligned, non-overlapping chunks of a limited size.
/// Returns an iterator over the chunks, yielding `(base address, size)` of each chunk, ordered by address.
pub fn split_access(address: Size, size: Size) -> impl Iterator<Item = (u64, u64)> {
let start_address = address.bytes();
let end_address = start_address + size.bytes();
let start_address_aligned = start_address.next_multiple_of(MAX_ACCESS_SIZE);
let end_address_aligned = (end_address / MAX_ACCESS_SIZE) * MAX_ACCESS_SIZE; // prev_multiple_of
debug!(
"GenMC: splitting NA memory access into {MAX_ACCESS_SIZE} byte chunks: {}B + {} * {MAX_ACCESS_SIZE}B + {}B = {size:?}",
start_address_aligned - start_address,
(end_address_aligned - start_address_aligned) / MAX_ACCESS_SIZE,
end_address - end_address_aligned,
);
// FIXME(genmc): could make remaining accesses powers-of-2, instead of 1 byte.
let start_chunks = (start_address..start_address_aligned).map(|address| (address, 1));
let aligned_chunks = (start_address_aligned..end_address_aligned)
.step_by(MAX_ACCESS_SIZE.try_into().unwrap())
.map(|address| (address, MAX_ACCESS_SIZE));
let end_chunks = (end_address_aligned..end_address).map(|address| (address, 1));
start_chunks.chain(aligned_chunks).chain(end_chunks)
}
/// Inverse function to `scalar_to_genmc_scalar`.
///
/// Convert a Miri `Scalar` to a `GenmcScalar`.
/// To be able to restore pointer provenance from a `GenmcScalar`, the base address of the allocation of the pointer is also stored in the `GenmcScalar`.
/// We cannot use the `AllocId` instead of the base address, since Miri has no control over the `AllocId`, and it may change across executions.
/// Pointers with `Wildcard` provenance are not supported.
pub fn scalar_to_genmc_scalar<'tcx>(
_ecx: &MiriInterpCx<'tcx>,
scalar: Scalar,
) -> InterpResult<'tcx, GenmcScalar> {
interp_ok(match scalar {
rustc_const_eval::interpret::Scalar::Int(scalar_int) => {
// FIXME(genmc): Add u128 support once GenMC supports it.
let value: u64 = scalar_int.to_uint(scalar_int.size()).try_into().unwrap();
GenmcScalar { value, is_init: true }
}
rustc_const_eval::interpret::Scalar::Ptr(_pointer, _size) =>
throw_unsup_format!(
"FIXME(genmc): Implement sending pointers (with provenance) to GenMC."
),
})
}
/// Inverse function to `scalar_to_genmc_scalar`.
///
/// Convert a `GenmcScalar` back into a Miri `Scalar`.
/// For pointers, attempt to convert the stored base address of their allocation back into an `AllocId`.
pub fn genmc_scalar_to_scalar<'tcx>(
_ecx: &MiriInterpCx<'tcx>,
scalar: GenmcScalar,
size: Size,
) -> InterpResult<'tcx, Scalar> {
// FIXME(genmc): Add GenmcScalar to Miri Pointer conversion.
// NOTE: GenMC always returns 64 bit values, and the upper bits are not yet truncated.
// FIXME(genmc): GenMC should be doing the truncation, not Miri.
let (value_scalar_int, _got_truncated) = ScalarInt::truncate_from_uint(scalar.value, size);
interp_ok(Scalar::Int(value_scalar_int))
}
impl AtomicReadOrd {
pub(super) fn to_genmc(self) -> MemOrdering {
match self {
AtomicReadOrd::Relaxed => MemOrdering::Relaxed,
AtomicReadOrd::Acquire => MemOrdering::Acquire,
AtomicReadOrd::SeqCst => MemOrdering::SequentiallyConsistent,
}
}
}
impl AtomicWriteOrd {
pub(super) fn to_genmc(self) -> MemOrdering {
match self {
AtomicWriteOrd::Relaxed => MemOrdering::Relaxed,
AtomicWriteOrd::Release => MemOrdering::Release,
AtomicWriteOrd::SeqCst => MemOrdering::SequentiallyConsistent,
}
}
}

View file

@ -1,78 +1,171 @@
#![allow(unused)] // FIXME(GenMC): remove this
use std::cell::{Cell, RefCell};
use std::sync::Arc;
use std::cell::Cell;
use genmc_sys::{GenmcParams, createGenmcHandle};
use genmc_sys::{
GENMC_GLOBAL_ADDRESSES_MASK, GenmcScalar, MemOrdering, MiriGenmcShim, UniquePtr,
create_genmc_driver_handle,
};
use rustc_abi::{Align, Size};
use rustc_const_eval::interpret::{InterpCx, InterpResult, interp_ok};
use rustc_middle::mir;
use rustc_const_eval::interpret::{AllocId, InterpCx, InterpResult, interp_ok};
use rustc_middle::{mir, throw_machine_stop, throw_ub_format, throw_unsup_format};
// FIXME(genmc,tracing): Implement some work-around for enabling debug/trace level logging (currently disabled statically in rustc).
use tracing::{debug, info};
use self::global_allocations::{EvalContextExt as _, GlobalAllocationHandler};
use self::helper::{MAX_ACCESS_SIZE, genmc_scalar_to_scalar, scalar_to_genmc_scalar};
use self::thread_id_map::ThreadIdMap;
use crate::concurrency::genmc::helper::split_access;
use crate::{
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig,
MiriMachine, Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith,
MiriMachine, MiriMemoryKind, Scalar, TerminationInfo, ThreadId, ThreadManager, VisitProvenance,
VisitWith,
};
mod config;
mod global_allocations;
mod helper;
mod run;
pub(crate) mod scheduling;
mod thread_id_map;
pub use self::config::GenmcConfig;
pub use self::run::run_genmc_mode;
// FIXME(GenMC): add fields
pub struct GenmcCtx {
/// Some actions Miri does are allowed to cause data races.
/// GenMC will not be informed about certain actions (e.g. non-atomic loads) when this flag is set.
allow_data_races: Cell<bool>,
#[derive(Clone, Copy, Debug)]
pub enum ExitType {
MainThreadFinish,
ExitCalled,
}
/// The exit status of a program.
/// GenMC must store this if a thread exits while any others can still run.
/// The other threads must also be explored before the program is terminated.
#[derive(Clone, Copy, Debug)]
struct ExitStatus {
exit_code: i32,
exit_type: ExitType,
}
impl ExitStatus {
fn do_leak_check(self) -> bool {
matches!(self.exit_type, ExitType::MainThreadFinish)
}
}
/// State that is reset at the start of every execution.
#[derive(Debug, Default)]
struct PerExecutionState {
/// Thread id management, such as mapping between Miri `ThreadId` and GenMC's thread ids, or selecting GenMC thread ids.
thread_id_manager: RefCell<ThreadIdMap>,
/// A flag to indicate that we should not forward non-atomic accesses to genmc, e.g. because we
/// are executing an atomic operation.
allow_data_races: Cell<bool>,
/// The exit status of the program. We keep running other threads even after `exit` to ensure
/// we cover all possible executions.
/// `None` if no thread has called `exit` and the main thread isn't finished yet.
exit_status: Cell<Option<ExitStatus>>,
}
impl PerExecutionState {
fn reset(&self) {
self.allow_data_races.replace(false);
self.thread_id_manager.borrow_mut().reset();
self.exit_status.set(None);
}
}
#[derive(Debug)]
struct GlobalState {
/// Keep track of global allocations, to ensure they keep the same address across different executions, even if the order of allocations changes.
/// The `AllocId` for globals is stable across executions, so we can use it as an identifier.
global_allocations: GlobalAllocationHandler,
}
impl GlobalState {
fn new(target_usize_max: u64) -> Self {
Self { global_allocations: GlobalAllocationHandler::new(target_usize_max) }
}
}
/// The main interface with GenMC.
/// Each `GenmcCtx` owns one `MiriGenmcShim`, which owns one `GenMCDriver` (the GenMC model checker).
/// For each GenMC run (estimation or verification), one or more `GenmcCtx` can be created (one per Miri thread).
/// However, for now, we only ever have one `GenmcCtx` per run.
///
/// In multithreading, each worker thread has its own `GenmcCtx`, which will have their results combined in the end.
/// FIXME(genmc): implement multithreading.
///
/// Some data is shared across all `GenmcCtx` in the same run, namely data for global allocation handling.
/// Globals must be allocated in a consistent manner, i.e., each global allocation must have the same address in each execution.
///
/// Some state is reset between each execution in the same run.
pub struct GenmcCtx {
/// Handle to the GenMC model checker.
handle: RefCell<UniquePtr<MiriGenmcShim>>,
/// State that is reset at the start of every execution.
exec_state: PerExecutionState,
/// State that persists across executions.
/// All `GenmcCtx` in one verification step share this state.
global_state: Arc<GlobalState>,
}
/// GenMC Context creation and administrative / query actions
impl GenmcCtx {
/// Create a new `GenmcCtx` from a given config.
pub fn new(miri_config: &MiriConfig) -> Self {
fn new(miri_config: &MiriConfig, global_state: Arc<GlobalState>) -> Self {
let genmc_config = miri_config.genmc_config.as_ref().unwrap();
let handle = createGenmcHandle(&genmc_config.params);
assert!(!handle.is_null());
eprintln!("Miri: GenMC handle creation successful!");
drop(handle);
eprintln!("Miri: Dropping GenMC handle successful!");
// FIXME(GenMC): implement
std::process::exit(0);
let handle =
RefCell::new(create_genmc_driver_handle(&genmc_config.params, genmc_config.log_level));
Self { handle, exec_state: Default::default(), global_state }
}
pub fn get_stuck_execution_count(&self) -> usize {
todo!()
/// Get the number of blocked executions encountered by GenMC.
pub fn get_blocked_execution_count(&self) -> u64 {
let mc = self.handle.borrow();
mc.as_ref().unwrap().get_blocked_execution_count()
}
pub fn print_genmc_graph(&self) {
todo!()
/// Get the number of explored executions encountered by GenMC.
pub fn get_explored_execution_count(&self) -> u64 {
let mc = self.handle.borrow();
mc.as_ref().unwrap().get_explored_execution_count()
}
/// Check if GenMC encountered an error that wasn't immediately returned during execution.
/// Returns a string representation of the error if one occurred.
pub fn try_get_error(&self) -> Option<String> {
let mc = self.handle.borrow();
mc.as_ref()
.unwrap()
.get_error_string()
.as_ref()
.map(|error| error.to_string_lossy().to_string())
}
/// Check if GenMC encountered an error that wasn't immediately returned during execution.
/// Returns a string representation of the error if one occurred.
pub fn get_result_message(&self) -> String {
let mc = self.handle.borrow();
mc.as_ref()
.unwrap()
.get_result_message()
.as_ref()
.map(|error| error.to_string_lossy().to_string())
.expect("there should always be a message")
}
/// This function determines if we should continue exploring executions or if we are done.
///
/// In GenMC mode, the input program should be repeatedly executed until this function returns `true` or an error is found.
pub fn is_exploration_done(&self) -> bool {
todo!()
let mut mc = self.handle.borrow_mut();
mc.as_mut().unwrap().is_exploration_done()
}
/// Inform GenMC that a new program execution has started.
/// This function should be called at the start of every execution.
pub(crate) fn handle_execution_start(&self) {
todo!()
}
/// Inform GenMC that the program's execution has ended.
///
/// This function must be called even when the execution got stuck (i.e., it returned a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`).
pub(crate) fn handle_execution_end<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> Result<(), String> {
todo!()
}
/**** Memory access handling ****/
/// Select whether data race free actions should be allowed. This function should be used carefully!
///
/// If `true` is passed, allow for data races to happen without triggering an error, until this function is called again with argument `false`.
@ -83,10 +176,59 @@ impl GenmcCtx {
/// # Panics
/// If data race free is attempted to be set more than once (i.e., no nesting allowed).
pub(super) fn set_ongoing_action_data_race_free(&self, enable: bool) {
let old = self.allow_data_races.replace(enable);
debug!("GenMC: set_ongoing_action_data_race_free ({enable})");
let old = self.exec_state.allow_data_races.replace(enable);
assert_ne!(old, enable, "cannot nest allow_data_races");
}
/// Check whether data races are currently allowed (e.g., for loading values for validation which are not actually loaded by the program).
fn get_alloc_data_races(&self) -> bool {
self.exec_state.allow_data_races.get()
}
}
/// GenMC event handling. These methods are used to inform GenMC about events happening in the program, and to handle scheduling decisions.
impl GenmcCtx {
/// Prepare for the next execution and inform GenMC about it.
/// Must be called before at the start of every execution.
fn prepare_next_execution(&self) {
// Reset per-execution state.
self.exec_state.reset();
// Inform GenMC about the new execution.
let mut mc = self.handle.borrow_mut();
mc.as_mut().unwrap().handle_execution_start();
}
/// Inform GenMC that the program's execution has ended.
///
/// This function must be called even when the execution is blocked
/// (i.e., it returned a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcBlockedExecution`).
/// Don't call this function if an error was found.
///
/// GenMC detects certain errors only when the execution ends.
/// If an error occured, a string containing a short error description is returned.
///
/// GenMC currently doesn't return an error in all cases immediately when one happens.
/// This function will also check for those, and return their error description.
///
/// To get the all messages (warnings, errors) that GenMC produces, use the `get_result_message` method.
fn handle_execution_end(&self) -> Option<String> {
let mut mc = self.handle.borrow_mut();
let result = mc.as_mut().unwrap().handle_execution_end();
result.as_ref().map(|msg| msg.to_string_lossy().to_string())?;
// GenMC currently does not return an error value immediately in all cases.
// We manually query for any errors here to ensure we don't miss any.
drop(mc); // `try_get_error` needs access to the `RefCell`.
self.try_get_error()
}
/**** Memory access handling ****/
/// Inform GenMC about an atomic load.
/// Returns that value that the load should read.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
pub(crate) fn atomic_load<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
@ -95,89 +237,143 @@ impl GenmcCtx {
ordering: AtomicReadOrd,
old_val: Option<Scalar>,
) -> InterpResult<'tcx, Scalar> {
assert!(!self.allow_data_races.get());
todo!()
assert!(!self.get_alloc_data_races(), "atomic load with data race checking disabled.");
let genmc_old_value = if let Some(scalar) = old_val {
scalar_to_genmc_scalar(ecx, scalar)?
} else {
GenmcScalar::UNINIT
};
let read_value =
self.handle_load(&ecx.machine, address, size, ordering.to_genmc(), genmc_old_value)?;
genmc_scalar_to_scalar(ecx, read_value, size)
}
/// Inform GenMC about an atomic store.
/// Returns `true` if the stored value should be reflected in Miri's memory.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
pub(crate) fn atomic_store<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
address: Size,
size: Size,
value: Scalar,
old_value: Option<Scalar>,
ordering: AtomicWriteOrd,
) -> InterpResult<'tcx, ()> {
assert!(!self.allow_data_races.get());
todo!()
) -> InterpResult<'tcx, bool> {
assert!(!self.get_alloc_data_races(), "atomic store with data race checking disabled.");
let genmc_value = scalar_to_genmc_scalar(ecx, value)?;
let genmc_old_value = if let Some(scalar) = old_value {
scalar_to_genmc_scalar(ecx, scalar)?
} else {
GenmcScalar::UNINIT
};
self.handle_store(
&ecx.machine,
address,
size,
genmc_value,
genmc_old_value,
ordering.to_genmc(),
)
}
/// Inform GenMC about an atomic fence.
pub(crate) fn atomic_fence<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
ordering: AtomicFenceOrd,
) -> InterpResult<'tcx, ()> {
assert!(!self.allow_data_races.get());
todo!()
_machine: &MiriMachine<'tcx>,
_ordering: AtomicFenceOrd,
) -> InterpResult<'tcx> {
assert!(!self.get_alloc_data_races(), "atomic fence with data race checking disabled.");
throw_unsup_format!("FIXME(genmc): Add support for atomic fences.")
}
/// Inform GenMC about an atomic read-modify-write operation.
///
/// Returns `(old_val, new_val)`.
/// Returns `(old_val, Option<new_val>)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
pub(crate) fn atomic_rmw_op<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
address: Size,
size: Size,
ordering: AtomicRwOrd,
(rmw_op, not): (mir::BinOp, bool),
rhs_scalar: Scalar,
) -> InterpResult<'tcx, (Scalar, Scalar)> {
assert!(!self.allow_data_races.get());
todo!()
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
_address: Size,
_size: Size,
_ordering: AtomicRwOrd,
(_rmw_op, _not): (mir::BinOp, bool),
_rhs_scalar: Scalar,
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>)> {
assert!(
!self.get_alloc_data_races(),
"atomic read-modify-write operation with data race checking disabled."
);
throw_unsup_format!("FIXME(genmc): Add support for atomic RMW.")
}
/// Inform GenMC about an atomic `min` or `max` operation.
///
/// Returns `(old_val, new_val)`.
/// Returns `(old_val, Option<new_val>)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
pub(crate) fn atomic_min_max_op<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
address: Size,
size: Size,
ordering: AtomicRwOrd,
min: bool,
is_signed: bool,
rhs_scalar: Scalar,
) -> InterpResult<'tcx, (Scalar, Scalar)> {
assert!(!self.allow_data_races.get());
todo!()
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
_address: Size,
_size: Size,
_ordering: AtomicRwOrd,
_min: bool,
_is_signed: bool,
_rhs_scalar: Scalar,
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>)> {
assert!(
!self.get_alloc_data_races(),
"atomic min/max operation with data race checking disabled."
);
throw_unsup_format!("FIXME(genmc): Add support for atomic min/max.")
}
/// Returns `(old_val, Option<new_val>)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
pub(crate) fn atomic_exchange<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
address: Size,
size: Size,
rhs_scalar: Scalar,
ordering: AtomicRwOrd,
) -> InterpResult<'tcx, (Scalar, bool)> {
assert!(!self.allow_data_races.get());
todo!()
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
_address: Size,
_size: Size,
_rhs_scalar: Scalar,
_ordering: AtomicRwOrd,
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>)> {
assert!(
!self.get_alloc_data_races(),
"atomic swap operation with data race checking disabled."
);
throw_unsup_format!("FIXME(genmc): Add support for atomic swap.")
}
/// Inform GenMC about an atomic compare-exchange operation.
///
/// Returns the old value read by the compare exchange, optionally the value that Miri should write back to its memory, and whether the compare-exchange was a success or not.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
pub(crate) fn atomic_compare_exchange<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
address: Size,
size: Size,
expected_old_value: Scalar,
new_value: Scalar,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool,
) -> InterpResult<'tcx, (Scalar, bool)> {
assert!(!self.allow_data_races.get());
todo!()
_ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
_address: Size,
_size: Size,
_expected_old_value: Scalar,
_new_value: Scalar,
_success: AtomicRwOrd,
_fail: AtomicReadOrd,
_can_fail_spuriously: bool,
_old_value: Scalar,
) -> InterpResult<'tcx, (Scalar, Option<Scalar>, bool)> {
assert!(
!self.get_alloc_data_races(),
"atomic compare-exchange with data race checking disabled."
);
throw_unsup_format!("FIXME(genmc): Add support for atomic compare_exchange.")
}
/// Inform GenMC about a non-atomic memory load
@ -188,40 +384,178 @@ impl GenmcCtx {
machine: &MiriMachine<'tcx>,
address: Size,
size: Size,
) -> InterpResult<'tcx, ()> {
todo!()
) -> InterpResult<'tcx> {
debug!(
"GenMC: received memory_load (non-atomic): address: {:#x}, size: {}",
address.bytes(),
size.bytes()
);
if self.get_alloc_data_races() {
debug!("GenMC: data race checking disabled, ignoring non-atomic load.");
return interp_ok(());
}
// GenMC doesn't like ZSTs, and they can't have any data races, so we skip them
if size.bytes() == 0 {
return interp_ok(());
}
let handle_load = |address, size| {
// NOTE: Values loaded non-atomically are still handled by Miri, so we discard whatever we get from GenMC
let _read_value = self.handle_load(
machine,
address,
size,
MemOrdering::NotAtomic,
// This value is used to update the co-maximal store event to the same location.
// We don't need to update that store, since if it is ever read by any atomic loads, the value will be updated then.
// We use uninit for lack of a better value, since we don't know whether the location we currently load from is initialized or not.
GenmcScalar::UNINIT,
)?;
interp_ok(())
};
// This load is small enough so GenMC can handle it.
if size.bytes() <= MAX_ACCESS_SIZE {
return handle_load(address, size);
}
// This load is too big to be a single GenMC access, we have to split it.
// FIXME(genmc): This will misbehave if there are non-64bit-atomics in there.
// Needs proper support on the GenMC side for large and mixed atomic accesses.
for (address, size) in split_access(address, size) {
handle_load(Size::from_bytes(address), Size::from_bytes(size))?;
}
interp_ok(())
}
/// Inform GenMC about a non-atomic memory store
///
/// NOTE: Unlike for *atomic* stores, we don't provide the actual stored values to GenMC here.
pub(crate) fn memory_store<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
address: Size,
size: Size,
) -> InterpResult<'tcx, ()> {
todo!()
) -> InterpResult<'tcx> {
debug!(
"GenMC: received memory_store (non-atomic): address: {:#x}, size: {}",
address.bytes(),
size.bytes()
);
if self.get_alloc_data_races() {
debug!("GenMC: data race checking disabled, ignoring non-atomic store.");
return interp_ok(());
}
// GenMC doesn't like ZSTs, and they can't have any data races, so we skip them
if size.bytes() == 0 {
return interp_ok(());
}
let handle_store = |address, size| {
// We always write the the stored values to Miri's memory, whether GenMC says the write is co-maximal or not.
// The GenMC scheduler ensures that replaying an execution happens in porf-respecting order (po := program order, rf: reads-from order).
// This means that for any non-atomic read Miri performs, the corresponding write has already been replayed.
let _is_co_max_write = self.handle_store(
machine,
address,
size,
// We don't know the value that this store will write, but GenMC expects that we give it an actual value.
// Unfortunately, there are situations where this value can actually become visible
// to the program: when there is an atomic load reading from a non-atomic store.
// FIXME(genmc): update once mixed atomic-non-atomic support is added. Afterwards, this value should never be readable.
GenmcScalar::from_u64(0xDEADBEEF),
// This value is used to update the co-maximal store event to the same location.
// This old value cannot be read anymore by any future loads, since we are doing another non-atomic store to the same location.
// Any future load will either see the store we are adding now, or we have a data race (there can only be one possible non-atomic value to read from at any time).
// We use uninit for lack of a better value, since we don't know whether the location we currently write to is initialized or not.
GenmcScalar::UNINIT,
MemOrdering::NotAtomic,
)?;
interp_ok(())
};
// This store is small enough so GenMC can handle it.
if size.bytes() <= MAX_ACCESS_SIZE {
return handle_store(address, size);
}
// This store is too big to be a single GenMC access, we have to split it.
// FIXME(genmc): This will misbehave if there are non-64bit-atomics in there.
// Needs proper support on the GenMC side for large and mixed atomic accesses.
for (address, size) in split_access(address, size) {
handle_store(Size::from_bytes(address), Size::from_bytes(size))?;
}
interp_ok(())
}
/**** Memory (de)allocation ****/
/// This is also responsible for determining the address of the new allocation.
pub(crate) fn handle_alloc<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
alloc_id: AllocId,
size: Size,
alignment: Align,
memory_kind: MemoryKind,
) -> InterpResult<'tcx, u64> {
todo!()
assert!(
!self.get_alloc_data_races(),
"memory allocation with data race checking disabled."
);
let machine = &ecx.machine;
if memory_kind == MiriMemoryKind::Global.into() {
return ecx
.get_global_allocation_address(&self.global_state.global_allocations, alloc_id);
}
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
// GenMC doesn't support ZSTs, so we set the minimum size to 1 byte
let genmc_size = size.bytes().max(1);
let mut mc = self.handle.borrow_mut();
let pinned_mc = mc.as_mut().unwrap();
let chosen_address = pinned_mc.handle_malloc(genmc_tid, genmc_size, alignment.bytes());
// Non-global addresses should not be in the global address space or null.
assert_ne!(0, chosen_address, "GenMC malloc returned nullptr.");
assert_eq!(0, chosen_address & GENMC_GLOBAL_ADDRESSES_MASK);
// Sanity check the address alignment:
assert!(
chosen_address.is_multiple_of(alignment.bytes()),
"GenMC returned address {chosen_address:#x} with lower alignment than requested ({}).",
alignment.bytes()
);
interp_ok(chosen_address)
}
pub(crate) fn handle_dealloc<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
alloc_id: AllocId,
address: Size,
size: Size,
align: Align,
kind: MemoryKind,
) -> InterpResult<'tcx, ()> {
todo!()
) -> InterpResult<'tcx> {
assert_ne!(
kind,
MiriMemoryKind::Global.into(),
"we probably shouldn't try to deallocate global allocations (alloc_id: {alloc_id:?})"
);
assert!(
!self.get_alloc_data_races(),
"memory deallocation with data race checking disabled."
);
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread);
let mut mc = self.handle.borrow_mut();
let pinned_mc = mc.as_mut().unwrap();
pinned_mc.handle_free(genmc_tid, address.bytes());
interp_ok(())
}
/**** Thread management ****/
@ -229,50 +563,95 @@ impl GenmcCtx {
pub(crate) fn handle_thread_create<'tcx>(
&self,
threads: &ThreadManager<'tcx>,
// FIXME(genmc,symmetry reduction): pass info to GenMC
_start_routine: crate::Pointer,
_func_arg: &crate::ImmTy<'tcx>,
new_thread_id: ThreadId,
) -> InterpResult<'tcx, ()> {
assert!(!self.allow_data_races.get());
todo!()
) -> InterpResult<'tcx> {
assert!(!self.get_alloc_data_races(), "thread creation with data race checking disabled.");
let mut thread_infos = self.exec_state.thread_id_manager.borrow_mut();
let curr_thread_id = threads.active_thread();
let genmc_parent_tid = thread_infos.get_genmc_tid(curr_thread_id);
let genmc_new_tid = thread_infos.add_thread(new_thread_id);
let mut mc = self.handle.borrow_mut();
mc.as_mut().unwrap().handle_thread_create(genmc_new_tid, genmc_parent_tid);
interp_ok(())
}
pub(crate) fn handle_thread_join<'tcx>(
&self,
active_thread_id: ThreadId,
child_thread_id: ThreadId,
) -> InterpResult<'tcx, ()> {
assert!(!self.allow_data_races.get());
todo!()
) -> InterpResult<'tcx> {
assert!(!self.get_alloc_data_races(), "thread join with data race checking disabled.");
let thread_infos = self.exec_state.thread_id_manager.borrow();
let genmc_curr_tid = thread_infos.get_genmc_tid(active_thread_id);
let genmc_child_tid = thread_infos.get_genmc_tid(child_thread_id);
let mut mc = self.handle.borrow_mut();
mc.as_mut().unwrap().handle_thread_join(genmc_curr_tid, genmc_child_tid);
interp_ok(())
}
pub(crate) fn handle_thread_stack_empty(&self, thread_id: ThreadId) {
todo!()
pub(crate) fn handle_thread_finish<'tcx>(&self, threads: &ThreadManager<'tcx>) {
assert!(!self.get_alloc_data_races(), "thread finish with data race checking disabled.");
let curr_thread_id = threads.active_thread();
let thread_infos = self.exec_state.thread_id_manager.borrow();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
debug!("GenMC: thread {curr_thread_id:?} ({genmc_tid:?}) finished.");
let mut mc = self.handle.borrow_mut();
// NOTE: Miri doesn't support return values for threads, but GenMC expects one, so we return 0
mc.as_mut().unwrap().handle_thread_finish(genmc_tid, /* ret_val */ 0);
}
pub(crate) fn handle_thread_finish<'tcx>(
/// Handle a call to `libc::exit` or the exit of the main thread.
/// Unless an error is returned, the program should continue executing (in a different thread, chosen by the next scheduling call).
pub(crate) fn handle_exit<'tcx>(
&self,
threads: &ThreadManager<'tcx>,
) -> InterpResult<'tcx, ()> {
assert!(!self.allow_data_races.get());
todo!()
}
thread: ThreadId,
exit_code: i32,
exit_type: ExitType,
) -> InterpResult<'tcx> {
// Calling `libc::exit` doesn't do cleanup, so we skip the leak check in that case.
let exit_status = ExitStatus { exit_code, exit_type };
/**** Scheduling functionality ****/
if let Some(old_exit_status) = self.exec_state.exit_status.get() {
throw_ub_format!(
"`exit` called twice, first with status {old_exit_status:?}, now with status {exit_status:?}",
);
}
/// Ask for a scheduling decision. This should be called before every MIR instruction.
///
/// GenMC may realize that the execution got stuck, then this function will return a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`).
///
/// This is **not** an error by iself! Treat this as if the program ended normally: `handle_execution_end` should be called next, which will determine if were are any actual errors.
pub(crate) fn schedule_thread<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> InterpResult<'tcx, ThreadId> {
assert!(!self.allow_data_races.get());
todo!()
// FIXME(genmc): Add a flag to continue exploration even when the program exits with a non-zero exit code.
if exit_code != 0 {
info!("GenMC: 'exit' called with non-zero argument, aborting execution.");
let leak_check = exit_status.do_leak_check();
throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check });
}
if matches!(exit_type, ExitType::ExitCalled) {
let thread_infos = self.exec_state.thread_id_manager.borrow();
let genmc_tid = thread_infos.get_genmc_tid(thread);
let mut mc = self.handle.borrow_mut();
mc.as_mut().unwrap().handle_thread_kill(genmc_tid);
} else {
assert_eq!(thread, ThreadId::MAIN_THREAD);
}
// We continue executing now, so we store the exit status.
self.exec_state.exit_status.set(Some(exit_status));
interp_ok(())
}
/**** Blocking instructions ****/
#[allow(unused)]
pub(crate) fn handle_verifier_assume<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
@ -282,14 +661,119 @@ impl GenmcCtx {
}
}
impl GenmcCtx {
/// Inform GenMC about a load (atomic or non-atomic).
/// Returns the value that GenMC wants this load to read.
fn handle_load<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
address: Size,
size: Size,
memory_ordering: MemOrdering,
genmc_old_value: GenmcScalar,
) -> InterpResult<'tcx, GenmcScalar> {
assert!(
size.bytes() != 0
&& (memory_ordering == MemOrdering::NotAtomic || size.bytes().is_power_of_two())
);
if size.bytes() > MAX_ACCESS_SIZE {
throw_unsup_format!(
"GenMC mode currently does not support atomics larger than {MAX_ACCESS_SIZE} bytes.",
);
}
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread_id = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
debug!(
"GenMC: load, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} == {addr:#x}, size: {size:?}, ordering: {memory_ordering:?}, old_value: {genmc_old_value:x?}",
addr = address.bytes()
);
let mut mc = self.handle.borrow_mut();
let pinned_mc = mc.as_mut().unwrap();
let load_result = pinned_mc.handle_load(
genmc_tid,
address.bytes(),
size.bytes(),
memory_ordering,
genmc_old_value,
);
if let Some(error) = load_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
}
if !load_result.has_value {
// FIXME(GenMC): Implementing certain GenMC optimizations will lead to this.
unimplemented!("GenMC: load returned no value.");
}
debug!("GenMC: load returned value: {:?}", load_result.read_value);
interp_ok(load_result.read_value)
}
/// Inform GenMC about a store (atomic or non-atomic).
/// Returns true if the store is co-maximal, i.e., it should be written to Miri's memory too.
fn handle_store<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
address: Size,
size: Size,
genmc_value: GenmcScalar,
genmc_old_value: GenmcScalar,
memory_ordering: MemOrdering,
) -> InterpResult<'tcx, bool> {
assert!(
size.bytes() != 0
&& (memory_ordering == MemOrdering::NotAtomic || size.bytes().is_power_of_two())
);
if size.bytes() > MAX_ACCESS_SIZE {
throw_unsup_format!(
"GenMC mode currently does not support atomics larger than {MAX_ACCESS_SIZE} bytes."
);
}
let thread_infos = self.exec_state.thread_id_manager.borrow();
let curr_thread_id = machine.threads.active_thread();
let genmc_tid = thread_infos.get_genmc_tid(curr_thread_id);
debug!(
"GenMC: store, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} = {addr:#x}, size: {size:?}, ordering {memory_ordering:?}, value: {genmc_value:?}",
addr = address.bytes()
);
let mut mc = self.handle.borrow_mut();
let pinned_mc = mc.as_mut().unwrap();
let store_result = pinned_mc.handle_store(
genmc_tid,
address.bytes(),
size.bytes(),
genmc_value,
genmc_old_value,
memory_ordering,
);
if let Some(error) = store_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
}
interp_ok(store_result.is_coherence_order_maximal_write)
}
/**** Blocking functionality ****/
/// Handle a user thread getting blocked.
/// This may happen due to an manual `assume` statement added by a user
/// or added by some automated program transformation, e.g., for spinloops.
fn handle_user_block<'tcx>(&self, _machine: &MiriMachine<'tcx>) -> InterpResult<'tcx> {
todo!()
}
}
impl VisitProvenance for GenmcCtx {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// We don't have any tags.
}
}
impl GenmcCtx {
fn handle_user_block<'tcx>(&self, machine: &MiriMachine<'tcx>) -> InterpResult<'tcx, ()> {
todo!()
}
}

View file

@ -0,0 +1,71 @@
use std::rc::Rc;
use std::sync::Arc;
use rustc_middle::ty::TyCtxt;
use super::GlobalState;
use crate::rustc_const_eval::interpret::PointerArithmetic;
use crate::{GenmcCtx, MiriConfig};
/// Do a complete run of the program in GenMC mode.
/// This will call `eval_entry` multiple times, until either:
/// - An error is detected (indicated by a `None` return value)
/// - All possible executions are explored.
///
/// FIXME(genmc): add estimation mode setting.
pub fn run_genmc_mode<'tcx>(
config: &MiriConfig,
eval_entry: impl Fn(Rc<GenmcCtx>) -> Option<i32>,
tcx: TyCtxt<'tcx>,
) -> Option<i32> {
// There exists only one `global_state` per full run in GenMC mode.
// It is shared by all `GenmcCtx` in this run.
// FIXME(genmc): implement multithreading once GenMC supports it.
let global_state = Arc::new(GlobalState::new(tcx.target_usize_max()));
let genmc_ctx = Rc::new(GenmcCtx::new(config, global_state));
// `rep` is used to report the progress, Miri will panic on wrap-around.
for rep in 0u64.. {
tracing::info!("Miri-GenMC loop {}", rep + 1);
// Prepare for the next execution and inform GenMC about it.
genmc_ctx.prepare_next_execution();
// Execute the program until completion to get the return value, or return if an error happens:
// FIXME(genmc): add an option to allow the user to see the GenMC output message when the verification is done.
let return_code = eval_entry(genmc_ctx.clone())?;
// We inform GenMC that the execution is complete. If there was an error, we print it.
if let Some(error) = genmc_ctx.handle_execution_end() {
// This can be reached for errors that affect the entire execution, not just a specific event.
// For instance, linearizability checking and liveness checking report their errors this way.
// Neither are supported by Miri-GenMC at the moment though. However, GenMC also
// treats races on deallocation as global errors, so this code path is still reachable.
// Since we don't have any span information for the error at this point,
// we just print GenMC's error message.
eprintln!("(GenMC) Error detected: {error}");
eprintln!();
eprintln!("{}", genmc_ctx.get_result_message());
return None;
}
// Check if we've explored enough executions:
if !genmc_ctx.is_exploration_done() {
continue;
}
eprintln!("(GenMC) Verification complete. No errors were detected.");
let explored_execution_count = genmc_ctx.get_explored_execution_count();
let blocked_execution_count = genmc_ctx.get_blocked_execution_count();
eprintln!("Number of complete executions explored: {explored_execution_count}");
if blocked_execution_count > 0 {
eprintln!("Number of blocked executions seen: {blocked_execution_count}");
}
// Return the return code of the last execution.
return Some(return_code);
}
unreachable!()
}

View file

@ -0,0 +1,69 @@
use genmc_sys::{ActionKind, ExecutionState};
use super::GenmcCtx;
use crate::{
InterpCx, InterpResult, MiriMachine, TerminationInfo, ThreadId, interp_ok, throw_machine_stop,
};
impl GenmcCtx {
pub(crate) fn schedule_thread<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
) -> InterpResult<'tcx, ThreadId> {
let thread_manager = &ecx.machine.threads;
let active_thread_id = thread_manager.active_thread();
// Determine whether the next instruction in the current thread might be a load.
// This is used for the "writes-first" scheduling in GenMC.
// Scheduling writes before reads can be beneficial for verification performance.
// `Load` is a safe default for the next instruction type if we cannot guarantee that it isn't a load.
let curr_thread_next_instr_kind = if !thread_manager.active_thread_ref().is_enabled() {
// The current thread can get blocked (e.g., due to a thread join, `Mutex::lock`, assume statement, ...), then we need to ask GenMC for another thread to schedule.
// Most to all blocking operations have load semantics, since they wait on something to change in another thread,
// e.g., a thread join waiting on another thread to finish (join loads the return value(s) of the other thread),
// or a thread waiting for another thread to unlock a `Mutex`, which loads the mutex state (Locked, Unlocked).
ActionKind::Load
} else {
// This thread is still enabled. If it executes a terminator next, we consider yielding,
// but in all other cases we just keep running this thread since it never makes sense
// to yield before a non-atomic operation.
let Some(frame) = thread_manager.active_thread_stack().last() else {
return interp_ok(active_thread_id);
};
let either::Either::Left(loc) = frame.current_loc() else {
// We are unwinding, so the next step is definitely not atomic.
return interp_ok(active_thread_id);
};
let basic_block = &frame.body().basic_blocks[loc.block];
if let Some(_statement) = basic_block.statements.get(loc.statement_index) {
// Statements can't be atomic.
return interp_ok(active_thread_id);
}
// FIXME(genmc): determine terminator kind.
ActionKind::Load
};
let thread_infos = self.exec_state.thread_id_manager.borrow();
let genmc_tid = thread_infos.get_genmc_tid(active_thread_id);
let mut mc = self.handle.borrow_mut();
let pinned_mc = mc.as_mut().unwrap();
let result = pinned_mc.schedule_next(genmc_tid, curr_thread_next_instr_kind);
// Depending on the exec_state, we either schedule the given thread, or we are finished with this execution.
match result.exec_state {
ExecutionState::Ok => interp_ok(thread_infos.get_miri_tid(result.next_thread)),
ExecutionState::Blocked => throw_machine_stop!(TerminationInfo::GenmcBlockedExecution),
ExecutionState::Finished => {
let exit_status = self.exec_state.exit_status.get().expect(
"If the execution is finished, we should have a return value from the program.",
);
throw_machine_stop!(TerminationInfo::Exit {
code: exit_status.exit_code,
leak_check: exit_status.do_leak_check()
});
}
_ => unreachable!(),
}
}
}

View file

@ -0,0 +1,63 @@
use genmc_sys::GENMC_MAIN_THREAD_ID;
use rustc_data_structures::fx::FxHashMap;
use crate::ThreadId;
#[derive(Debug)]
pub struct ThreadIdMap {
/// Map from Miri thread IDs to GenMC thread IDs.
/// We assume as little as possible about Miri thread IDs, so we use a map.
miri_to_genmc: FxHashMap<ThreadId, i32>,
/// Map from GenMC thread IDs to Miri thread IDs.
/// We control which thread IDs are used, so we choose them in as an incrementing counter.
genmc_to_miri: Vec<ThreadId>, // FIXME(genmc): check if this assumption is (and will stay) correct.
}
impl Default for ThreadIdMap {
fn default() -> Self {
let miri_to_genmc = [(ThreadId::MAIN_THREAD, GENMC_MAIN_THREAD_ID)].into_iter().collect();
let genmc_to_miri = vec![ThreadId::MAIN_THREAD];
Self { miri_to_genmc, genmc_to_miri }
}
}
impl ThreadIdMap {
pub fn reset(&mut self) {
self.miri_to_genmc.clear();
self.miri_to_genmc.insert(ThreadId::MAIN_THREAD, GENMC_MAIN_THREAD_ID);
self.genmc_to_miri.clear();
self.genmc_to_miri.push(ThreadId::MAIN_THREAD);
}
#[must_use]
/// Add a new Miri thread to the mapping and dispense a new thread ID for GenMC to use.
pub fn add_thread(&mut self, thread_id: ThreadId) -> i32 {
// NOTE: We select the new thread ids as integers incremented by one (we use the length as the counter).
let next_thread_id = self.genmc_to_miri.len();
let genmc_tid = next_thread_id.try_into().unwrap();
// If there is already an entry, we override it.
// This could happen if Miri were to reuse `ThreadId`s, but we assume that if this happens, the previous thread with that id doesn't exist anymore.
self.miri_to_genmc.insert(thread_id, genmc_tid);
self.genmc_to_miri.push(thread_id);
genmc_tid
}
#[must_use]
/// Try to get the GenMC thread ID corresponding to a given Miri `ThreadId`.
/// Panics if there is no mapping for the given `ThreadId`.
pub fn get_genmc_tid(&self, thread_id: ThreadId) -> i32 {
*self.miri_to_genmc.get(&thread_id).unwrap()
}
#[must_use]
/// Get the Miri `ThreadId` corresponding to a given GenMC thread id.
/// Panics if the given thread id isn't valid.
pub fn get_miri_tid(&self, genmc_tid: i32) -> ThreadId {
let index: usize = genmc_tid.try_into().unwrap();
self.genmc_to_miri
.get(index)
.copied()
.expect("A thread id returned from GenMC should exist.")
}
}

View file

@ -142,7 +142,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Synchronize with the previous completion of an InitOnce.
/// Must only be called after checking that it is complete.
#[inline]
fn init_once_observe_completed(&mut self, init_once_ref: &InitOnceRef) {
fn init_once_observe_completed(&mut self, init_once_ref: &InitOnceRef) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let init_once = init_once_ref.0.borrow();
@ -152,6 +152,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"observing the completion of incomplete init once"
);
this.acquire_clock(&init_once.clock);
this.acquire_clock(&init_once.clock)
}
}

View file

@ -22,5 +22,5 @@ pub mod weak_memory;
mod genmc;
pub use self::data_race_handler::{AllocDataRaceHandler, GlobalDataRaceHandler};
pub use self::genmc::{GenmcConfig, GenmcCtx};
pub use self::genmc::{ExitType, GenmcConfig, GenmcCtx, run_genmc_mode};
pub use self::vector_clock::VClock;

View file

@ -209,6 +209,11 @@ impl<'tcx> Thread<'tcx> {
self.thread_name.as_deref()
}
/// Return whether this thread is enabled or not.
pub fn is_enabled(&self) -> bool {
self.state.is_enabled()
}
/// Get the name of the current thread for display purposes; will include thread ID if not set.
fn thread_display_name(&self, id: ThreadId) -> String {
if let Some(ref thread_name) = self.thread_name {
@ -404,6 +409,7 @@ pub struct ThreadManager<'tcx> {
/// A mapping from a thread-local static to the thread specific allocation.
thread_local_allocs: FxHashMap<(DefId, ThreadId), StrictPointer>,
/// A flag that indicates that we should change the active thread.
/// Completely ignored in GenMC mode.
yield_active_thread: bool,
/// A flag that indicates that we should do round robin scheduling of threads else randomized scheduling is used.
fixed_scheduling: bool,
@ -676,13 +682,6 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
#[inline]
fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
let this = self.eval_context_mut();
// Inform GenMC that a thread has finished all user code. GenMC needs to know this for scheduling.
// FIXME(GenMC): Thread-local destructors *are* user code, so this is odd. Also now that we
// support pre-main constructors, it can get called there as well.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
let thread_id = this.active_thread();
genmc_ctx.handle_thread_stack_empty(thread_id);
}
let mut callback = this
.active_thread_mut()
.on_stack_empty
@ -703,19 +702,19 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
/// If GenMC mode is active, the scheduling is instead handled by GenMC.
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
let this = self.eval_context_mut();
// In GenMC mode, we let GenMC do the scheduling
// In GenMC mode, we let GenMC do the scheduling.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
let next_thread_id = genmc_ctx.schedule_thread(this)?;
let thread_manager = &mut this.machine.threads;
thread_manager.active_thread = next_thread_id;
thread_manager.yield_active_thread = false;
assert!(thread_manager.threads[thread_manager.active_thread].state.is_enabled());
return interp_ok(SchedulingAction::ExecuteStep);
}
// We are not in GenMC mode, so we control the schedule
// We are not in GenMC mode, so we control the scheduling.
let thread_manager = &mut this.machine.threads;
let clock = &this.machine.monotonic_clock;
let rng = this.machine.rng.get_mut();
@ -863,7 +862,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
GlobalDataRaceHandler::Vclocks(data_race) =>
data_race.thread_created(&this.machine.threads, new_thread_id, current_span),
GlobalDataRaceHandler::Genmc(genmc_ctx) =>
genmc_ctx.handle_thread_create(&this.machine.threads, new_thread_id)?,
genmc_ctx.handle_thread_create(
&this.machine.threads,
start_routine,
&func_arg,
new_thread_id,
)?,
}
// Write the current thread-id, switch to the next thread later
// to treat this write operation as occurring on the current thread.
@ -916,13 +920,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let thread = this.active_thread_mut();
assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
thread.state = ThreadState::Terminated;
match &mut this.machine.data_race {
GlobalDataRaceHandler::None => {}
GlobalDataRaceHandler::Vclocks(data_race) =>
data_race.thread_terminated(&this.machine.threads),
GlobalDataRaceHandler::Genmc(genmc_ctx) =>
genmc_ctx.handle_thread_finish(&this.machine.threads)?,
}
// Deallocate TLS.
let gone_thread = this.active_thread();
{
@ -953,6 +951,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
}
match &mut this.machine.data_race {
GlobalDataRaceHandler::None => {}
GlobalDataRaceHandler::Vclocks(data_race) =>
data_race.thread_terminated(&this.machine.threads),
GlobalDataRaceHandler::Genmc(genmc_ctx) => {
// Inform GenMC that the thread finished.
// This needs to happen once all accesses to the thread are done, including freeing any TLS statics.
genmc_ctx.handle_thread_finish(&this.machine.threads)
}
}
// Unblock joining threads.
let unblock_reason = BlockReason::Join(gone_thread);
let threads = &this.machine.threads.threads;
@ -978,6 +988,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
callback: DynUnblockCallback<'tcx>,
) {
let this = self.eval_context_mut();
if timeout.is_some() && this.machine.data_race.as_genmc_ref().is_some() {
panic!("Unimplemented: Timeouts not yet supported in GenMC mode.");
}
let timeout = timeout.map(|(clock, anchor, duration)| {
let anchor = match clock {
TimeoutClock::RealTime => {
@ -1076,6 +1089,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"{:?} blocked on {:?} when trying to join",
thread_mgr.active_thread, joined_thread_id
);
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
genmc_ctx.handle_thread_join(thread_mgr.active_thread, joined_thread_id)?;
}
// The joined thread is still running, we need to wait for it.
// Once we get unblocked, perform the appropriate synchronization and write the return value.
let dest = return_dest.clone();

View file

@ -31,8 +31,9 @@ pub enum TerminationInfo {
},
Int2PtrWithStrictProvenance,
Deadlock,
/// In GenMC mode, an execution can get stuck in certain cases. This is not an error.
GenmcStuckExecution,
/// In GenMC mode, executions can get blocked, which stops the current execution without running any cleanup.
/// No leak checks should be performed if this happens, since they would give false positives.
GenmcBlockedExecution,
MultipleSymbolDefinitions {
link_name: Symbol,
first: SpanData,
@ -77,7 +78,8 @@ impl fmt::Display for TerminationInfo {
StackedBorrowsUb { msg, .. } => write!(f, "{msg}"),
TreeBorrowsUb { title, .. } => write!(f, "{title}"),
Deadlock => write!(f, "the evaluated program deadlocked"),
GenmcStuckExecution => write!(f, "GenMC determined that the execution got stuck"),
GenmcBlockedExecution =>
write!(f, "GenMC determined that the execution got blocked (this is not an error)"),
MultipleSymbolDefinitions { link_name, .. } =>
write!(f, "multiple definitions of symbol `{link_name}`"),
SymbolShimClashing { link_name, .. } =>
@ -243,11 +245,12 @@ pub fn report_error<'tcx>(
labels.push(format!("this thread got stuck here"));
None
}
GenmcStuckExecution => {
// This case should only happen in GenMC mode. We treat it like a normal program exit.
GenmcBlockedExecution => {
// This case should only happen in GenMC mode.
assert!(ecx.machine.data_race.as_genmc_ref().is_some());
tracing::info!("GenMC: found stuck execution");
return Some((0, true));
// The program got blocked by GenMC without finishing the execution.
// No cleanup code was executed, so we don't do any leak checks.
return Some((0, false));
}
MultipleSymbolDefinitions { .. } | SymbolShimClashing { .. } => None,
};

View file

@ -247,8 +247,21 @@ impl<'tcx> MainThreadState<'tcx> {
// to be like a global `static`, so that all memory reached by it is considered to "not leak".
this.terminate_active_thread(TlsAllocAction::Leak)?;
// Stop interpreter loop.
throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true });
// In GenMC mode, we do not immediately stop execution on main thread exit.
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
// If there's no error, execution will continue (on another thread).
genmc_ctx.handle_exit(
ThreadId::MAIN_THREAD,
exit_code,
crate::concurrency::ExitType::MainThreadFinish,
)?;
} else {
// Stop interpreter loop.
throw_machine_stop!(TerminationInfo::Exit {
code: exit_code,
leak_check: true
});
}
}
}
interp_ok(Poll::Pending)
@ -449,10 +462,6 @@ pub fn eval_entry<'tcx>(
// Copy setting before we move `config`.
let ignore_leaks = config.ignore_leaks;
if let Some(genmc_ctx) = &genmc_ctx {
genmc_ctx.handle_execution_start();
}
let mut ecx = match create_ecx(tcx, entry_id, entry_type, config, genmc_ctx).report_err() {
Ok(v) => v,
Err(err) => {
@ -476,15 +485,6 @@ pub fn eval_entry<'tcx>(
// Show diagnostic, if any.
let (return_code, leak_check) = report_error(&ecx, err)?;
// We inform GenMC that the execution is complete.
if let Some(genmc_ctx) = ecx.machine.data_race.as_genmc_ref()
&& let Err(error) = genmc_ctx.handle_execution_end(&ecx)
{
// FIXME(GenMC): Improve error reporting.
tcx.dcx().err(format!("GenMC returned an error: \"{error}\""));
return None;
}
// If we get here there was no fatal error.
// Possibly check for memory leaks.

View file

@ -132,7 +132,7 @@ pub use crate::concurrency::thread::{
BlockReason, DynUnblockCallback, EvalContextExt as _, StackEmptyCallback, ThreadId,
ThreadManager, TimeoutAnchor, TimeoutClock, UnblockKind,
};
pub use crate::concurrency::{GenmcConfig, GenmcCtx};
pub use crate::concurrency::{GenmcConfig, GenmcCtx, run_genmc_mode};
pub use crate::data_structures::dedup_range_map::DedupRangeMap;
pub use crate::data_structures::mono_hash_map::MonoHashMap;
pub use crate::diagnostics::{

View file

@ -747,11 +747,13 @@ impl<'tcx> MiriMachine<'tcx> {
thread_cpu_affinity
.insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
}
let alloc_addresses =
RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr, tcx));
MiriMachine {
tcx,
borrow_tracker,
data_race,
alloc_addresses: RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr)),
alloc_addresses,
// `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
env_vars: EnvVars::default(),
main_fn_ret_place: None,
@ -1504,9 +1506,8 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
}
match &machine.data_race {
GlobalDataRaceHandler::None => {}
GlobalDataRaceHandler::Genmc(genmc_ctx) => {
genmc_ctx.memory_store(machine, ptr.addr(), range.size)?;
}
GlobalDataRaceHandler::Genmc(genmc_ctx) =>
genmc_ctx.memory_store(machine, ptr.addr(), range.size)?,
GlobalDataRaceHandler::Vclocks(_global_state) => {
let _trace = enter_trace_span!(data_race::before_memory_write);
let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
@ -1543,7 +1544,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
match &machine.data_race {
GlobalDataRaceHandler::None => {}
GlobalDataRaceHandler::Genmc(genmc_ctx) =>
genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?,
genmc_ctx.handle_dealloc(machine, alloc_id, ptr.addr(), kind)?,
GlobalDataRaceHandler::Vclocks(_global_state) => {
let _trace = enter_trace_span!(data_race::before_memory_deallocation);
let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();

View file

@ -490,13 +490,22 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
"exit" => {
let [code] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let code = this.read_scalar(code)?.to_i32()?;
if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
// If there is no error, execution should continue (on a different thread).
genmc_ctx.handle_exit(
this.machine.threads.active_thread(),
code,
crate::concurrency::ExitType::ExitCalled,
)?;
todo!(); // FIXME(genmc): Add a way to return here that is allowed to not do progress (can't use existing EmulateItemResult variants).
}
throw_machine_stop!(TerminationInfo::Exit { code, leak_check: false });
}
"abort" => {
let [] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
throw_machine_stop!(TerminationInfo::Abort(
"the program aborted execution".to_owned()
))
));
}
// Standard C allocation

View file

@ -639,7 +639,7 @@ fn return_ready_list<'tcx>(
&des.1,
)?;
// Synchronize waking thread with the event of interest.
ecx.acquire_clock(&epoll_event_instance.clock);
ecx.acquire_clock(&epoll_event_instance.clock)?;
num_of_events = num_of_events.strict_add(1);
} else {

View file

@ -294,7 +294,7 @@ fn eventfd_read<'tcx>(
);
} else {
// Synchronize with all prior `write` calls to this FD.
ecx.acquire_clock(&eventfd.clock.borrow());
ecx.acquire_clock(&eventfd.clock.borrow())?;
// Return old counter value into user-space buffer.
ecx.write_int(counter, &buf_place)?;

View file

@ -345,7 +345,7 @@ fn anonsocket_read<'tcx>(
// Synchronize with all previous writes to this buffer.
// FIXME: this over-synchronizes; a more precise approach would be to
// only sync with the writes whose data we will read.
ecx.acquire_clock(&readbuf.clock);
ecx.acquire_clock(&readbuf.clock)?;
// Do full read / partial read based on the space available.
// Conveniently, `read` exists on `VecDeque` and has exactly the desired behavior.

View file

@ -60,7 +60,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
true
}
InitOnceStatus::Complete => {
this.init_once_observe_completed(init_once_ref);
this.init_once_observe_completed(init_once_ref)?;
this.write_scalar(this.eval_windows("c", "FALSE"), pending_place)?;
this.write_scalar(this.eval_windows("c", "TRUE"), dest)?;
true

View file

@ -0,0 +1,19 @@
error: Undefined Behavior: Non-atomic race
--> tests/genmc/fail/data_race/weak_orderings.rs:LL:CC
|
LL | X = 2;
| ^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at tests/genmc/fail/data_race/weak_orderings.rs:LL:CC
= note: inside `<std::boxed::Box<{closure@tests/genmc/fail/data_race/weak_orderings.rs:LL:CC}> as std::ops::FnOnce<()>>::call_once` at RUSTLIB/alloc/src/boxed.rs:LL:CC
note: inside `genmc::spawn_pthread_closure::thread_func::<{closure@tests/genmc/fail/data_race/weak_orderings.rs:LL:CC}>`
--> tests/genmc/fail/data_race/../../../utils/genmc.rs:LL:CC
|
LL | f();
| ^^^
error: aborting due to 1 previous error

View file

@ -0,0 +1,19 @@
error: Undefined Behavior: Non-atomic race
--> tests/genmc/fail/data_race/weak_orderings.rs:LL:CC
|
LL | X = 2;
| ^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at tests/genmc/fail/data_race/weak_orderings.rs:LL:CC
= note: inside `<std::boxed::Box<{closure@tests/genmc/fail/data_race/weak_orderings.rs:LL:CC}> as std::ops::FnOnce<()>>::call_once` at RUSTLIB/alloc/src/boxed.rs:LL:CC
note: inside `genmc::spawn_pthread_closure::thread_func::<{closure@tests/genmc/fail/data_race/weak_orderings.rs:LL:CC}>`
--> tests/genmc/fail/data_race/../../../utils/genmc.rs:LL:CC
|
LL | f();
| ^^^
error: aborting due to 1 previous error

View file

@ -0,0 +1,19 @@
error: Undefined Behavior: Non-atomic race
--> tests/genmc/fail/data_race/weak_orderings.rs:LL:CC
|
LL | X = 2;
| ^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at tests/genmc/fail/data_race/weak_orderings.rs:LL:CC
= note: inside `<std::boxed::Box<{closure@tests/genmc/fail/data_race/weak_orderings.rs:LL:CC}> as std::ops::FnOnce<()>>::call_once` at RUSTLIB/alloc/src/boxed.rs:LL:CC
note: inside `genmc::spawn_pthread_closure::thread_func::<{closure@tests/genmc/fail/data_race/weak_orderings.rs:LL:CC}>`
--> tests/genmc/fail/data_race/../../../utils/genmc.rs:LL:CC
|
LL | f();
| ^^^
error: aborting due to 1 previous error

View file

@ -0,0 +1,40 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: rlx_rlx rlx_acq rel_rlx
// Translated from GenMC's test `wrong/racy/MP+rel+rlx`, `MP+rlx+acq` and `MP+rlx+rlx`.
// Test if Miri with GenMC can detect the data race on `X`.
// Relaxed orderings on an atomic store-load pair should not synchronize the non-atomic write to X, leading to a data race.
// FIXME(genmc): once Miri-GenMC error reporting is improved, ensure that it correctly points to the two spans involved in the data race.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{self, *};
use genmc::spawn_pthread_closure;
static mut X: u64 = 0;
static Y: AtomicUsize = AtomicUsize::new(0);
const STORE_ORD: Ordering = if cfg!(rel_rlx) { Release } else { Relaxed };
const LOAD_ORD: Ordering = if cfg!(rlx_acq) { Acquire } else { Relaxed };
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
unsafe {
spawn_pthread_closure(|| {
X = 1;
Y.store(1, STORE_ORD);
});
spawn_pthread_closure(|| {
if Y.load(LOAD_ORD) != 0 {
X = 2; //~ ERROR: Undefined Behavior: Non-atomic race
}
});
}
0
}

View file

@ -0,0 +1,15 @@
error: Undefined Behavior: entering unreachable code
--> tests/genmc/fail/simple/2w2w_weak.rs:LL:CC
|
LL | std::hint::unreachable_unchecked();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `miri_start` at tests/genmc/fail/simple/2w2w_weak.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,15 @@
error: Undefined Behavior: entering unreachable code
--> tests/genmc/fail/simple/2w2w_weak.rs:LL:CC
|
LL | std::hint::unreachable_unchecked();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `miri_start` at tests/genmc/fail/simple/2w2w_weak.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,73 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: sc3_rel1 release4 relaxed4
// The pass tests "2w2w_3sc_1rel.rs", "2w2w_4rel" and "2w2w_4sc" and the fail test "2w2w_weak.rs" are related.
//
// This test has multiple variants using different memory orderings.
// When using any combination of orderings except using all 4 `SeqCst`, the memory model allows the program to result in (X, Y) == (1, 1).
// The "pass" variants only check that we get the expected number of executions (3 for all SC, 4 otherwise),
// and a valid outcome every execution, but do not check that we get all allowed results.
// This "fail" variant ensures we can explore the execution resulting in (1, 1), an incorrect unsafe assumption that the result (1, 1) is impossible.
//
// Miri without GenMC is unable to produce this program execution and thus detect the incorrect assumption, even with `-Zmiri-many-seeds`.
//
// To get good coverage, we test the combination with the strongest orderings allowing this result (3 `SeqCst`, 1 `Release`),
// the weakest orderings (4 `Relaxed`), and one in between (4 `Release`).
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::{self, *};
use crate::genmc::{join_pthreads, spawn_pthread_closure};
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
// Strongest orderings allowing result (1, 1).
#[cfg(seqcst_rel)]
const STORE_ORD_3: Ordering = SeqCst;
#[cfg(seqcst_rel)]
const STORE_ORD_1: Ordering = Release;
// 4 * `Release`.
#[cfg(acqrel)]
const STORE_ORD_3: Ordering = Release;
#[cfg(acqrel)]
const STORE_ORD_1: Ordering = Release;
// Weakest orderings (4 * `Relaxed`).
#[cfg(not(any(acqrel, seqcst_rel)))]
const STORE_ORD_3: Ordering = Relaxed;
#[cfg(not(any(acqrel, seqcst_rel)))]
const STORE_ORD_1: Ordering = Relaxed;
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
unsafe {
let ids = [
spawn_pthread_closure(|| {
X.store(1, STORE_ORD_3);
Y.store(2, STORE_ORD_3);
}),
spawn_pthread_closure(|| {
Y.store(1, STORE_ORD_1);
X.store(2, STORE_ORD_3);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// We mark the result (1, 1) as unreachable, which is incorrect.
let result = (X.load(Relaxed), Y.load(Relaxed));
if result == (1, 1) {
// FIXME(genmc): Use `std::process::abort()` once backtraces for that are improved (https://github.com/rust-lang/rust/pull/146118)
std::hint::unreachable_unchecked(); //~ ERROR: entering unreachable code
}
0
}
}

View file

@ -0,0 +1,15 @@
error: Undefined Behavior: entering unreachable code
--> tests/genmc/fail/simple/2w2w_weak.rs:LL:CC
|
LL | std::hint::unreachable_unchecked();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `miri_start` at tests/genmc/fail/simple/2w2w_weak.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,51 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "2CoWR".
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove these initializing writes once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
Y.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let ids = [
spawn_pthread_closure(|| {
a = Y.load(Acquire);
}),
spawn_pthread_closure(|| {
b = X.load(Acquire);
}),
spawn_pthread_closure(|| {
X.store(1, Release);
}),
spawn_pthread_closure(|| {
Y.store(1, Release);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((a, b), (0, 0) | (0, 1) | (1, 0) | (1, 1)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 4

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 4

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 4

View file

@ -0,0 +1,54 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: release1 release2
// Translated from GenMC's test "2+2W+3sc+rel1" and "2+2W+3sc+rel2" (two variants that swap which store is `Release`).
//
// The pass tests "2w2w_3sc_1rel.rs", "2w2w_4rel" and "2w2w_4sc" and the fail test "2w2w_weak.rs" are related.
// Check "2w2w_weak.rs" for a more detailed description.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
unsafe {
let ids = [
spawn_pthread_closure(|| {
X.store(1, SeqCst);
Y.store(2, SeqCst);
}),
// Variant 1: `Release` goes first.
#[cfg(release1)]
spawn_pthread_closure(|| {
Y.store(1, Release);
X.store(2, SeqCst);
}),
// Variant 2: `Release` goes second.
#[cfg(not(release1))]
spawn_pthread_closure(|| {
Y.store(1, SeqCst);
X.store(2, Release);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
let result = (X.load(Relaxed), Y.load(Relaxed));
if !matches!(result, (1, 2) | (1, 1) | (2, 2) | (2, 1)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,45 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "2+2W".
//
// The pass tests "2w2w_3sc_1rel.rs", "2w2w_4rel" and "2w2w_4sc" and the fail test "2w2w_weak.rs" are related.
// Check "2w2w_weak.rs" for a more detailed description.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
unsafe {
let ids = [
spawn_pthread_closure(|| {
Y.store(1, Release);
X.store(2, Release);
}),
spawn_pthread_closure(|| {
X.store(1, Release);
Y.store(2, Release);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
let result = (X.load(Relaxed), Y.load(Relaxed));
if !matches!(result, (1, 2) | (1, 1) | (2, 2) | (2, 1)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 4

View file

@ -0,0 +1,45 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "2+2W+4c".
//
// The pass tests "2w2w_3sc_1rel.rs", "2w2w_4rel" and "2w2w_4sc" and the fail test "2w2w_weak.rs" are related.
// Check "2w2w_weak.rs" for a more detailed description.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
unsafe {
let ids = [
spawn_pthread_closure(|| {
X.store(1, SeqCst);
Y.store(2, SeqCst);
}),
spawn_pthread_closure(|| {
Y.store(1, SeqCst);
X.store(2, SeqCst);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
let result = (X.load(Relaxed), Y.load(Relaxed));
if !matches!(result, (2, 1) | (2, 2) | (1, 2)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 3

View file

@ -0,0 +1,64 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "IRIW-acq-sc" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove these initializing writes once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
Y.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let mut c = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, SeqCst);
}),
spawn_pthread_closure(|| {
a = X.load(Acquire);
Y.load(SeqCst);
}),
spawn_pthread_closure(|| {
b = Y.load(Acquire);
c = X.load(SeqCst);
}),
spawn_pthread_closure(|| {
Y.store(1, SeqCst);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!(
(a, b, c),
(0, 0, 0)
| (0, 0, 1)
| (0, 1, 0)
| (0, 1, 1)
| (1, 0, 0)
| (1, 0, 1)
| (1, 1, 0)
| (1, 1, 1)
) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 16

View file

@ -0,0 +1,47 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/LB" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove these initializing writes once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
Y.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let ids = [
spawn_pthread_closure(|| {
a = Y.load(Acquire);
X.store(2, Release);
}),
spawn_pthread_closure(|| {
b = X.load(Acquire);
Y.store(1, Release);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((a, b), (0, 0) | (0, 2) | (1, 0)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 3

View file

@ -0,0 +1,47 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/MP" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove these initializing writes once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
Y.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Release);
Y.store(1, Release);
}),
spawn_pthread_closure(|| {
a = Y.load(Acquire);
b = X.load(Acquire);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((a, b), (0, 0) | (0, 1) | (1, 1)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 3

View file

@ -0,0 +1,47 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/SB" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
static Y: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove these initializing writes once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
Y.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Release);
a = Y.load(Acquire);
}),
spawn_pthread_closure(|| {
Y.store(1, Release);
b = X.load(Acquire);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((a, b), (0, 0) | (0, 1) | (1, 0) | (1, 1)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 4

View file

@ -0,0 +1,45 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove this initializing write once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Release);
X.store(2, Release);
}),
spawn_pthread_closure(|| {
a = X.load(Acquire);
b = X.load(Acquire);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((a, b), (0, 0) | (0, 1) | (0, 2) | (1, 1) | (1, 2) | (2, 2)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 6

View file

@ -0,0 +1,46 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR0" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove this initializing write once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Release);
}),
spawn_pthread_closure(|| {
a = X.load(Acquire);
}),
spawn_pthread_closure(|| {
b = X.load(Acquire);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((a, b), (0, 0) | (0, 1) | (1, 0) | (1, 1)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 4

View file

@ -0,0 +1,58 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR1" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove this initializing write once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let mut c = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Release);
}),
spawn_pthread_closure(|| {
X.store(2, Release);
}),
spawn_pthread_closure(|| {
a = X.load(Acquire);
b = X.load(Acquire);
}),
spawn_pthread_closure(|| {
c = X.load(Acquire);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values (only 0, 1, 2 are allowed):
if !(matches!(a, 0..=2) && matches!(b, 0..=2) && matches!(c, 0..=2)) {
std::process::abort();
}
// The 36 possible program executions can have 21 different results for (a, b, c).
// Of the 27 = 3*3*3 total results for (a, b, c),
// those where `a != 0` and `b == 0` are not allowed by the memory model.
// Once the load for `a` reads either 1 or 2, the load for `b` must see that store too, so it cannot read 0.
if a != 0 && b == 0 {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 36

View file

@ -0,0 +1,70 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR2" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove this initializing write once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let mut c = 1234;
let mut d = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Release);
}),
spawn_pthread_closure(|| {
X.store(2, Release);
}),
spawn_pthread_closure(|| {
a = X.load(Acquire);
b = X.load(Acquire);
}),
spawn_pthread_closure(|| {
c = X.load(Acquire);
d = X.load(Acquire);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values (only 0, 1, 2 are allowed):
if !(matches!(a, 0..=2) && matches!(b, 0..=2) && matches!(c, 0..=2) && matches!(d, 0..=2)) {
std::process::abort();
}
// The 72 possible program executions can have 47 different results for (a, b, c, d).
// Of the 81 = 3*3*3*3 total results for (a, b, c, d),
// those where `a != 0` and `b == 0` are not allowed by the memory model.
// Once the load for `a` reads either 1 or 2, the load for `b` must see that store too, so it cannot read 0.
// The same applies to `c, d` in the other thread.
//
// Additionally, if one thread reads `1, 2` or `2, 1`, the other thread cannot see the opposite order.
if a != 0 && b == 0 {
std::process::abort();
} else if c != 0 && d == 0 {
std::process::abort();
} else if (a, b) == (1, 2) && (c, d) == (2, 1) {
std::process::abort();
} else if (a, b) == (2, 1) && (c, d) == (1, 2) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 72

View file

@ -0,0 +1,43 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRW" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove this initializing write once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
unsafe {
let mut a = 1234;
let ids = [
spawn_pthread_closure(|| {
a = X.load(Acquire);
X.store(1, Release);
}),
spawn_pthread_closure(|| {
X.store(2, Release);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values (the load cannot read `1`):
if !matches!(a, 0 | 2) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 3

View file

@ -0,0 +1,40 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoWR" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
unsafe {
let mut a = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Release);
a = X.load(Acquire);
}),
spawn_pthread_closure(|| {
X.store(2, Release);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values (the load cannot read `0`):
if !matches!(a, 1 | 2) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 3

View file

@ -0,0 +1,47 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/default" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove this initializing write once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
unsafe {
let mut a = 1234;
let mut b = 1234;
let ids = [
spawn_pthread_closure(|| {
a = X.load(Acquire);
b = X.load(Acquire);
}),
spawn_pthread_closure(|| {
X.store(1, Release);
}),
spawn_pthread_closure(|| {
X.store(2, Release);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((a, b), (0, 0) | (0, 1) | (0, 2) | (1, 1) | (1, 2) | (2, 1) | (2, 2)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 12

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 9

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 9

View file

@ -0,0 +1,68 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: join no_join
// Translated from GenMC's "litmus/detour" test.
// This test has two revisitions to test whether we get the same result
// independent of whether we join the spawned threads or not.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicI64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicI64 = AtomicI64::new(0);
static Y: AtomicI64 = AtomicI64::new(0);
static Z: AtomicI64 = AtomicI64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove these initializing writes once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
Y.store(0, Relaxed);
Z.store(0, Relaxed);
unsafe {
// Make these static so we can exit the main thread while the other threads still run.
// If these are `let mut` like the other tests, this will cause a use-after-free bug.
static mut A: i64 = 1234;
static mut B: i64 = 1234;
static mut C: i64 = 1234;
let ids = [
spawn_pthread_closure(|| {
X.store(1, Relaxed);
}),
spawn_pthread_closure(|| {
A = Z.load(Relaxed);
X.store(A.wrapping_sub(1), Relaxed);
B = X.load(Relaxed);
Y.store(B, Relaxed);
}),
spawn_pthread_closure(|| {
C = Y.load(Relaxed);
Z.store(C, Relaxed);
}),
];
// The `no_join` revision doesn't join any of the running threads to test that
// we still explore the same number of executions in that case.
if cfg!(no_join) {
return 0;
}
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
if !matches!((A, B, C), (0, 1, 0) | (0, -1, 0) | (0, 1, 1) | (0, -1, -1)) {
std::process::abort();
}
0
}
}

View file

@ -0,0 +1,54 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "fr+w+w+w+reads" test.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::*;
use crate::genmc::*;
static X: AtomicU64 = AtomicU64::new(0);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// FIXME(genmc,HACK): remove this initializing write once Miri-GenMC supports mixed atomic-non-atomic accesses.
X.store(0, Relaxed);
unsafe {
let mut result = [1234; 4];
let ids = [
spawn_pthread_closure(|| {
X.store(1, Relaxed);
}),
spawn_pthread_closure(|| {
X.store(2, Relaxed);
}),
spawn_pthread_closure(|| {
X.store(3, Relaxed);
}),
spawn_pthread_closure(|| {
result[0] = X.load(Relaxed);
result[1] = X.load(Relaxed);
result[2] = X.load(Relaxed);
result[3] = X.load(Relaxed);
}),
];
// Join so we can read the final values.
join_pthreads(ids);
// Check that we don't get any unexpected values:
for val in result {
if !matches!(val, 0..=3) {
std::process::abort();
}
}
0
}
}

View file

@ -0,0 +1,2 @@
(GenMC) Verification complete. No errors were detected.
Number of complete executions explored: 210

View file

@ -1,8 +0,0 @@
//@compile-flags: -Zmiri-genmc
#![no_main]
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
0
}

View file

@ -1,5 +0,0 @@
warning: borrow tracking has been disabled, it is not (yet) supported in GenMC mode.
C++: GenMC handle created!
Miri: GenMC handle creation successful!
C++: GenMC handle destroyed!
Miri: Dropping GenMC handle successful!

View file

@ -0,0 +1,57 @@
#![allow(unused)]
use std::ffi::c_void;
use libc::{self, pthread_attr_t, pthread_t};
/// Spawn a thread using `pthread_create`, abort the process on any errors.
pub unsafe fn spawn_pthread(
f: extern "C" fn(*mut c_void) -> *mut c_void,
value: *mut c_void,
) -> pthread_t {
let mut thread_id: pthread_t = 0;
let attr: *const pthread_attr_t = std::ptr::null();
if unsafe { libc::pthread_create(&raw mut thread_id, attr, f, value) } != 0 {
std::process::abort();
}
thread_id
}
/// Unsafe because we do *not* check that `F` is `Send + 'static`.
/// That makes it much easier to write tests...
pub unsafe fn spawn_pthread_closure<F: FnOnce()>(f: F) -> pthread_t {
let mut thread_id: pthread_t = 0;
let attr: *const pthread_attr_t = std::ptr::null();
let f = Box::new(f);
extern "C" fn thread_func<F: FnOnce()>(f: *mut c_void) -> *mut c_void {
let f = unsafe { Box::from_raw(f as *mut F) };
f();
std::ptr::null_mut()
}
if unsafe {
libc::pthread_create(
&raw mut thread_id,
attr,
thread_func::<F>,
Box::into_raw(f) as *mut c_void,
)
} != 0
{
std::process::abort();
}
thread_id
}
// Join the given pthread, abort the process on any errors.
pub unsafe fn join_pthread(thread_id: pthread_t) {
if unsafe { libc::pthread_join(thread_id, std::ptr::null_mut()) } != 0 {
std::process::abort();
}
}
// Join the `N` given pthreads, abort the process on any errors.
pub unsafe fn join_pthreads<const N: usize>(thread_ids: [pthread_t; N]) {
let _ = thread_ids.map(|id| join_pthread(id));
}