Rollup merge of #152700 - RalfJung:miri, r=RalfJung

miri subtree update

Subtree update of `miri` to ef30e906aa.

Created using https://github.com/rust-lang/josh-sync.

r? @ghost
This commit is contained in:
Stuart Cook 2026-02-17 13:02:20 +11:00 committed by GitHub
commit 476a6308f1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
56 changed files with 390 additions and 561 deletions

View file

@ -66,6 +66,23 @@ process for such contributions:
This process is largely informal, and its primary goal is to more clearly communicate expectations.
Please get in touch with us if you have any questions!
## Scope of Miri shims
Miri has "shims" to implement functionality that is usually implemented in C libraries which are
invoked from Rust code, such as opening files or spawning threads, as well as for
CPU-vendor-provided SIMD intrinsics. However, the set of C functions that Rust code invokes this way
is enormous, and for obvious reasons we have no intention of implementing every C API ever written
in Miri.
At the moment, the general guideline for "could this function have a shim in Miri" is: we will
generally only add shims for functions that can be implemented in a portable way using just what is
provided by the Rust standard library. The function should also be reasonably widely-used in Rust
code to justify the review and maintenance effort (i.e. the easier the function is to implement, the
lower the barrier). Other than that, we might make exceptions for certain cases if (a) there is a
good case for why Miri should support those APIs, and (b) robust and widely-used portable libraries
exist in the Rust ecosystem. We will generally not add shims to Miri that would require Miri to
directly interact with platform-specific APIs (such as `libc` or `windows-sys`).
## Preparing the build environment
Miri heavily relies on internal and unstable rustc interfaces to execute MIR,

View file

@ -562,9 +562,9 @@ checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7"
[[package]]
name = "git2"
version = "0.20.2"
version = "0.20.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110"
checksum = "7b88256088d75a56f8ecfa070513a775dd9107f6530ef14919dac831af9cfe2b"
dependencies = [
"bitflags",
"libc",
@ -804,9 +804,9 @@ dependencies = [
[[package]]
name = "libgit2-sys"
version = "0.18.2+1.9.1"
version = "0.18.3+1.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222"
checksum = "c9b3acc4b91781bb0b3386669d325163746af5f6e4f73e6d2d630e09a35f3487"
dependencies = [
"cc",
"libc",

View file

@ -39,7 +39,7 @@ serde = { version = "1.0.219", features = ["derive"], optional = true }
[target.'cfg(target_os = "linux")'.dependencies]
nix = { version = "0.30.1", features = ["mman", "ptrace", "signal"], optional = true }
ipc-channel = { version = "0.20.0", optional = true }
capstone = { version = "0.14", optional = true }
capstone = { version = "0.14", features = ["arch_x86", "full"], default-features = false, optional = true}
[target.'cfg(all(target_os = "linux", target_pointer_width = "64", target_endian = "little"))'.dependencies]
genmc-sys = { path = "./genmc-sys/", version = "0.1.0", optional = true }

View file

@ -626,6 +626,8 @@ Definite bugs found:
* [`ReentrantLock` not correctly dealing with reuse of addresses for TLS storage of different threads](https://github.com/rust-lang/rust/pull/141248)
* [Rare Deadlock in the thread (un)parking example code](https://github.com/rust-lang/rust/issues/145816)
* [`winit` registering a global constructor with the wrong ABI on Windows](https://github.com/rust-windowing/winit/issues/4435)
* [`VecDeque::splice` confusing physical and logical indices](https://github.com/rust-lang/rust/issues/151758)
* [Data race in `oneshot` channel](https://github.com/faern/oneshot/issues/69)
Violations of [Stacked Borrows] found that are likely bugs (but Stacked Borrows is currently just an experiment):

View file

@ -63,6 +63,9 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
"setup" => MiriCommand::Setup,
"test" | "t" | "run" | "r" | "nextest" => MiriCommand::Forward(subcommand),
"clean" => MiriCommand::Clean,
// For use by the `./miri test` dependency builder.
"build" if env::var_os("MIRI_BUILD_TEST_DEPS").is_some() =>
MiriCommand::Forward("build".into()),
_ => {
// Check for version and help flags.
if has_arg_flag("--help") || has_arg_flag("-h") {
@ -309,6 +312,7 @@ pub fn phase_rustc(args: impl Iterator<Item = String>, phase: RustcPhase) {
// Ask rustc for the filename (since that is target-dependent).
let mut rustc = miri_for_host(); // sysroot doesn't matter for this so we just use the host
rustc.arg("--print").arg("file-names");
rustc.arg("-Zunstable-options"); // needed for JSON targets
for flag in ["--crate-name", "--crate-type", "--target"] {
for val in get_arg_flag_values(flag) {
rustc.arg(flag).arg(val);

View file

@ -88,6 +88,11 @@ pub fn setup(
};
let cargo_cmd = {
let mut command = cargo();
// Allow JSON targets since users do not have a good way to set this flag otherwise.
if env::var("RUSTC_STAGE").is_err() {
// ^ is a HACK for bootstrap cargo. FIXME(cfg(bootstrap)) remove the hack.
command.arg("-Zjson-target-spec");
}
// Use Miri as rustc to build a libstd compatible with us (and use the right flags).
// We set ourselves (`cargo-miri`) instead of Miri directly to be able to patch the flags
// for `libpanic_abort` (usually this is done by bootstrap but we have to do it ourselves).

View file

@ -129,7 +129,7 @@ function run_tests_minimal {
time ./miri test $TARGET_FLAG "$@"
# Ensure that a small smoke test of cargo-miri works.
time cargo miri run --manifest-path test-cargo-miri/no-std-smoke/Cargo.toml $TARGET_FLAG
time cargo miri run --manifest-path test-cargo-miri/no-std-smoke/Cargo.toml -Zjson-target-spec $TARGET_FLAG
endgroup
}
@ -173,7 +173,9 @@ case $HOST_TARGET in
# Host
MIR_OPT=1 MANY_SEEDS=64 TEST_BENCH=1 CARGO_MIRI_ENV=1 run_tests
# Custom target JSON file
TEST_TARGET=tests/x86_64-unknown-kernel.json MIRI_NO_STD=1 run_tests_minimal no_std
TEST_TARGET=tests/x86_64-unknown-kernel.json MIRI_NO_STD=1 MIRIFLAGS="-Zunstable-options" run_tests_minimal no_std
# Not officially supported tier 2
MANY_SEEDS=16 TEST_TARGET=x86_64-pc-solaris run_tests
;;
aarch64-apple-darwin)
# Host
@ -184,7 +186,6 @@ case $HOST_TARGET in
# Not officially supported tier 2
MANY_SEEDS=16 TEST_TARGET=mips-unknown-linux-gnu run_tests # a 32bit big-endian target, and also a target without 64bit atomics
MANY_SEEDS=16 TEST_TARGET=x86_64-unknown-illumos run_tests
MANY_SEEDS=16 TEST_TARGET=x86_64-pc-solaris run_tests
;;
i686-pc-windows-msvc)
# Host

View file

@ -1 +1 @@
d10ac47c20152feb5e99b1c35a2e6830f77c66dc
7bee525095c0872e87c038c412c781b9bbb3f5dc

View file

@ -66,10 +66,10 @@ impl IsolatedAlloc {
// And make sure the align is at least one page
let align = std::cmp::max(layout.align(), self.page_size);
// pg_count gives us the # of pages needed to satisfy the size. For
// align > page_size where align = n * page_size, a sufficently-aligned
// align > page_size where align = n * page_size, a sufficiently-aligned
// address must exist somewhere in the range of
// some_page_aligned_address..some_page_aligned_address + (n-1) * page_size
// (since if some_page_aligned_address + n * page_size is sufficently aligned,
// (since if some_page_aligned_address + n * page_size is sufficiently aligned,
// then so is some_page_aligned_address itself per the definition of n, so we
// can avoid using that 1 extra page).
// Thus we allocate n-1 extra pages

View file

@ -2,7 +2,7 @@
//! <https://github.com/tikv/minstant/blob/27c9ec5ec90b5b67113a748a4defee0d2519518c/src/tsc_now.rs>.
//! A useful resource is also
//! <https://www.pingcap.com/blog/how-we-trace-a-kv-database-with-less-than-5-percent-performance-impact/>,
//! although this file does not implement TSC synchronization but insteads pins threads to CPUs,
//! although this file does not implement TSC synchronization but instead pins threads to CPUs,
//! since the former is not reliable (i.e. it might lead to non-monotonic time measurements).
//! Another useful resource for future improvements might be measureme's time measurement utils:
//! <https://github.com/rust-lang/measureme/blob/master/measureme/src/counters.rs>.
@ -11,7 +11,7 @@
#![cfg(feature = "tracing")]
/// This alternative `TracingChromeInstant` implementation was made entirely to suit the needs of
/// [crate::log::tracing_chrome], and shouldn't be used for anything else. It featues two functions:
/// [crate::log::tracing_chrome], and shouldn't be used for anything else. It features two functions:
/// - [TracingChromeInstant::setup_for_thread_and_start], which sets up the current thread to do
/// proper time tracking and returns a point in time to use as "t=0", and
/// - [TracingChromeInstant::with_elapsed_micros_subtracting_tracing], which allows

View file

@ -578,7 +578,7 @@ pub mod diagnostics {
// - created as Reserved { conflicted: false },
// then Unique -> Disabled is forbidden
// A potential `Reserved { conflicted: false }
// -> Reserved { conflicted: true }` is inexistant or irrelevant,
// -> Reserved { conflicted: true }` is inexistent or irrelevant,
// and so is the `Reserved { conflicted: false } -> Unique`
(Unique, Frozen) => false,
(ReservedFrz { conflicted: true }, _) => false,

View file

@ -26,7 +26,7 @@ use super::foreign_access_skipping::IdempotentForeignAccess;
use super::perms::{PermTransition, Permission};
use super::tree_visitor::{ChildrenVisitMode, ContinueTraversal, NodeAppArgs, TreeVisitor};
use super::unimap::{UniIndex, UniKeyMap, UniValMap};
use super::wildcard::WildcardState;
use super::wildcard::ExposedCache;
use crate::borrow_tracker::{AccessKind, GlobalState, ProtectorKind};
use crate::*;
@ -89,7 +89,7 @@ impl LocationState {
&mut self,
idx: UniIndex,
nodes: &mut UniValMap<Node>,
wildcard_accesses: &mut UniValMap<WildcardState>,
exposed_cache: &mut ExposedCache,
access_kind: AccessKind,
relatedness: AccessRelatedness,
protected: bool,
@ -99,7 +99,7 @@ impl LocationState {
// ensures it is only called when `skip_if_known_noop` returns
// `Recurse`, due to the contract of `traverse_this_parents_children_other`.
self.record_new_access(access_kind, relatedness);
let old_access_level = self.permission.strongest_allowed_local_access(protected);
let transition = self.perform_access(access_kind, relatedness, protected)?;
if !transition.is_noop() {
let node = nodes.get_mut(idx).unwrap();
@ -111,8 +111,8 @@ impl LocationState {
// We need to update the wildcard state, if the permission
// of an exposed pointer changes.
if node.is_exposed {
let access_type = self.permission.strongest_allowed_local_access(protected);
WildcardState::update_exposure(idx, access_type, nodes, wildcard_accesses);
let access_level = self.permission.strongest_allowed_local_access(protected);
exposed_cache.update_exposure(nodes, idx, old_access_level, access_level);
}
}
Ok(())
@ -226,7 +226,7 @@ impl LocationState {
/// Records a new access, so that future access can potentially be skipped
/// by `skip_if_known_noop`. This must be called on child accesses, and otherwise
/// shoud be called on foreign accesses for increased performance. It should not be called
/// should be called on foreign accesses for increased performance. It should not be called
/// when `skip_if_known_noop` indicated skipping, since it then is a no-op.
/// See `foreign_access_skipping.rs`
fn record_new_access(&mut self, access_kind: AccessKind, rel_pos: AccessRelatedness) {
@ -261,14 +261,8 @@ pub struct LocationTree {
///
/// We do uphold the fact that `keys(perms)` is a subset of `keys(nodes)`
pub perms: UniValMap<LocationState>,
/// Maps a tag and a location to its wildcard access tracking information,
/// with possible lazy initialization.
///
/// If this allocation doesn't have any exposed nodes, then this map doesn't get
/// initialized. This way we only need to allocate the map if we need it.
///
/// NOTE: same guarantees on entry initialization as for `perms`.
pub wildcard_accesses: UniValMap<WildcardState>,
/// Caches information about the relatedness of nodes for a wildcard access.
pub exposed_cache: ExposedCache,
}
/// Tree structure with both parents and children since we want to be
/// able to traverse the tree efficiently in both directions.
@ -276,7 +270,7 @@ pub struct LocationTree {
pub struct Tree {
/// Mapping from tags to keys. The key obtained can then be used in
/// any of the `UniValMap` relative to this allocation, i.e.
/// `nodes`, `LocationTree::perms` and `LocationTree::wildcard_accesses`
/// `nodes`, `LocationTree::perms` and `LocationTree::exposed_cache`
/// of the same `Tree`.
/// The parent-child relationship in `Node` is encoded in terms of these same
/// keys, so traversing the entire tree needs exactly one access to
@ -372,8 +366,8 @@ impl Tree {
IdempotentForeignAccess::None,
),
);
let wildcard_accesses = UniValMap::default();
DedupRangeMap::new(size, LocationTree { perms, wildcard_accesses })
let exposed_cache = ExposedCache::default();
DedupRangeMap::new(size, LocationTree { perms, exposed_cache })
};
Self { roots: SmallVec::from_slice(&[root_idx]), nodes, locations, tag_mapping }
}
@ -451,19 +445,9 @@ impl<'tcx> Tree {
}
}
// We need to ensure the consistency of the wildcard access tracking data structure.
// For this, we insert the correct entry for this tag based on its parent, if it exists.
// If we are inserting a new wildcard root (with Wildcard as parent_prov) then we insert
// the special wildcard root initial state instead.
for (_range, loc) in self.locations.iter_mut_all() {
if let Some(parent_idx) = parent_idx {
if let Some(parent_access) = loc.wildcard_accesses.get(parent_idx) {
loc.wildcard_accesses.insert(idx, parent_access.for_new_child());
}
} else {
loc.wildcard_accesses.insert(idx, WildcardState::for_wildcard_root());
}
}
// We don't have to update `exposed_cache` as the new node is not exposed and
// has no children so the default counts of 0 are correct.
// If the parent is a wildcard pointer, then it doesn't track SIFA and doesn't need to be updated.
if let Some(parent_idx) = parent_idx {
// Inserting the new perms might have broken the SIFA invariant (see
@ -807,7 +791,7 @@ impl Tree {
let node = self.nodes.remove(this).unwrap();
for (_range, loc) in self.locations.iter_mut_all() {
loc.perms.remove(this);
loc.wildcard_accesses.remove(this);
loc.exposed_cache.remove(this);
}
self.tag_mapping.remove(&node.tag);
}
@ -943,7 +927,7 @@ impl<'tcx> LocationTree {
};
let accessed_root_tag = accessed_root.map(|idx| nodes.get(idx).unwrap().tag);
for root in roots {
for (i, root) in roots.enumerate() {
let tag = nodes.get(root).unwrap().tag;
// On a protector release access we have to skip the children of the accessed tag.
// However, if the tag has exposed children then some of the wildcard subtrees could
@ -981,6 +965,7 @@ impl<'tcx> LocationTree {
access_kind,
global,
diagnostics,
/*is_wildcard_tree*/ i != 0,
)?;
}
interp_ok(())
@ -1029,7 +1014,7 @@ impl<'tcx> LocationTree {
.perform_transition(
args.idx,
args.nodes,
&mut args.data.wildcard_accesses,
&mut args.data.exposed_cache,
access_kind,
args.rel_pos,
protected,
@ -1074,12 +1059,18 @@ impl<'tcx> LocationTree {
access_kind: AccessKind,
global: &GlobalState,
diagnostics: &DiagnosticInfo,
is_wildcard_tree: bool,
) -> InterpResult<'tcx> {
let get_relatedness = |idx: UniIndex, node: &Node, loc: &LocationTree| {
let wildcard_state = loc.wildcard_accesses.get(idx).cloned().unwrap_or_default();
// If the tag is larger than `max_local_tag` then the access can only be foreign.
let only_foreign = max_local_tag.is_some_and(|max_local_tag| max_local_tag < node.tag);
wildcard_state.access_relatedness(access_kind, only_foreign)
loc.exposed_cache.access_relatedness(
root,
idx,
access_kind,
is_wildcard_tree,
only_foreign,
)
};
// Whether there is an exposed node in this tree that allows this access.
@ -1156,7 +1147,7 @@ impl<'tcx> LocationTree {
perm.perform_transition(
args.idx,
args.nodes,
&mut args.data.wildcard_accesses,
&mut args.data.exposed_cache,
access_kind,
relatedness,
protected,
@ -1175,19 +1166,11 @@ impl<'tcx> LocationTree {
})
},
)?;
// If there is no exposed node in this tree that allows this access, then the
// access *must* be foreign. So we check if the root of this tree would allow this
// as a foreign access, and if not, then we can error.
// In practice, all wildcard trees accept foreign accesses, but the main tree does
// not, so this catches UB when none of the nodes in the main tree allows this access.
if !has_valid_exposed
&& self
.wildcard_accesses
.get(root)
.unwrap()
.access_relatedness(access_kind, /* only_foreign */ true)
.is_none()
{
// If there is no exposed node in this tree that allows this access, then the access *must*
// be foreign to the entire subtree. Foreign accesses are only possible on wildcard subtrees
// as there are no ancestors to the main root. So if we do not find a valid exposed node in
// the main tree then this access is UB.
if !has_valid_exposed && !is_wildcard_tree {
return Err(no_valid_exposed_references_error(diagnostics)).into();
}
interp_ok(())

View file

@ -741,7 +741,7 @@ mod spurious_read {
);
eprintln!(" (arbitrary code instanciated with '{opaque}')");
err += 1;
// We found an instanciation of the opaque code that makes this Pattern
// We found an instantiation of the opaque code that makes this Pattern
// fail, we don't really need to check the rest.
break;
}

View file

@ -99,7 +99,7 @@ where
assert!(self.stack.is_empty());
// First, handle accessed node. A bunch of things need to
// be handled differently here compared to the further parents
// of `accesssed_node`.
// of `accessesed_node`.
{
self.propagate_at(this, accessed_node, AccessRelatedness::LocalAccess)?;
if matches!(visit_children, ChildrenVisitMode::VisitChildrenOfAccessed) {

View file

@ -1,4 +1,3 @@
use std::cmp::max;
use std::fmt::Debug;
use super::Tree;
@ -51,373 +50,141 @@ impl WildcardAccessRelatedness {
}
}
/// Caches information about where in the tree exposed nodes with permission to do reads/ rites are
/// located. [`ExposedCache`] stores this information a single location (or rather, a range of
/// homogeneous locations) for all nodes in an allocation.
///
/// Nodes not in this map have a default [`ExposedCacheNode`], i.e. they have no exposed children.
/// In particular, this map remains empty (and thus consumes no memory) until the first
/// node in the tree gets exposed.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct ExposedCache(UniValMap<ExposedCacheNode>);
/// State per location per node keeping track of where relative to this
/// node exposed nodes are and what access permissions they have.
///
/// Designed to be completely determined by its parent, siblings and
/// direct children's max_local_access/max_foreign_access.
#[derive(Clone, Default, PartialEq, Eq)]
pub struct WildcardState {
/// How many of this node's direct children have `max_local_access()==Write`.
child_writes: u16,
/// How many of this node's direct children have `max_local_access()>=Read`.
child_reads: u16,
/// The maximum access level that could happen from an exposed node
/// that is foreign to this node.
///
/// This is calculated as the `max()` of the parent's `max_foreign_access`,
/// `exposed_as` and the siblings' `max_local_access()`.
max_foreign_access: WildcardAccessLevel,
/// At what access level this node itself is exposed.
exposed_as: WildcardAccessLevel,
#[derive(Clone, Default, Debug, PartialEq, Eq)]
struct ExposedCacheNode {
/// How many local nodes (in this subtree) are exposed with write permissions.
local_writes: u16,
/// How many local nodes (in this subtree) are exposed with read permissions.
local_reads: u16,
}
impl Debug for WildcardState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("WildcardState")
.field("child_r/w", &(self.child_reads, self.child_writes))
.field("foreign", &self.max_foreign_access)
.field("exposed_as", &self.exposed_as)
.finish()
}
}
impl WildcardState {
/// The maximum access level that could happen from an exposed
/// node that is local to this node.
fn max_local_access(&self) -> WildcardAccessLevel {
use WildcardAccessLevel::*;
max(
self.exposed_as,
if self.child_writes > 0 {
Write
} else if self.child_reads > 0 {
Read
} else {
None
},
)
}
/// From where relative to the node with this wildcard info a read or write access could happen.
/// If `only_foreign` is true then we treat `LocalAccess` as impossible. This means we return
/// `None` if only a `LocalAccess` is possible, and we treat `EitherAccess` as a
/// `ForeignAccess`.
impl ExposedCache {
/// Returns the relatedness of a wildcard access to a node.
///
/// This function only considers a single subtree. If the current subtree does not contain
/// any valid exposed nodes then the function return `None`.
///
/// * `root`: The root of the subtree the node belongs to.
/// * `id`: The id of the node.
/// * `kind`: The kind of the wildcard access.
/// * `is_wildcard_tree`: This nodes belongs to a wildcard subtree.
/// This means we always treat foreign accesses as possible.
/// * `only_foreign`: Assume the access cannot come from a local node.
pub fn access_relatedness(
&self,
root: UniIndex,
id: UniIndex,
kind: AccessKind,
is_wildcard_tree: bool,
only_foreign: bool,
) -> Option<WildcardAccessRelatedness> {
let rel = match kind {
AccessKind::Read => self.read_access_relatedness(),
AccessKind::Write => self.write_access_relatedness(),
// All nodes in the tree are local to the root, so we can use the root to get the total
// number of valid exposed nodes in the tree.
let root = self.0.get(root).cloned().unwrap_or_default();
let node = self.0.get(id).cloned().unwrap_or_default();
let (total_num, local_num) = match kind {
AccessKind::Read => (root.local_reads, node.local_reads),
AccessKind::Write => (root.local_writes, node.local_writes),
};
// If this is a wildcard tree then an access can always be foreign as
// it could come from another tree.
// We can represent this by adding 1 to the total which means there
// always exists a foreign exposed node.
// (We cannot bake this into the root's count as then if `node == root` it would
// affect both `total` and `local`.)
let total_num = total_num + u16::from(is_wildcard_tree);
use WildcardAccessRelatedness::*;
let relatedness = if total_num == 0 {
// we return None if the tree does not contain any valid exposed nodes.
None
} else {
Some(if total_num == local_num {
// If all valid exposed nodes are local to this node then the access is local.
LocalAccess
} else if local_num == 0 {
// If the node does not have any exposed nodes as children then the access is foreign.
ForeignAccess
} else {
// If some but not all of the valid exposed nodes are local then we cannot determine the correct relatedness.
EitherAccess
})
};
if only_foreign {
use WildcardAccessRelatedness as E;
match rel {
Some(E::EitherAccess | E::ForeignAccess) => Some(E::ForeignAccess),
Some(E::LocalAccess) | None => None,
// This is definitely not a local access; clamp the result accordingly.
match relatedness {
Some(LocalAccess) => None,
Some(ForeignAccess) => Some(ForeignAccess),
Some(EitherAccess) => Some(ForeignAccess),
None => None,
}
} else {
rel
relatedness
}
}
/// From where relative to the node with this wildcard info a read access could happen.
fn read_access_relatedness(&self) -> Option<WildcardAccessRelatedness> {
let has_foreign = self.max_foreign_access >= WildcardAccessLevel::Read;
let has_local = self.max_local_access() >= WildcardAccessLevel::Read;
use WildcardAccessRelatedness as E;
match (has_foreign, has_local) {
(true, true) => Some(E::EitherAccess),
(true, false) => Some(E::ForeignAccess),
(false, true) => Some(E::LocalAccess),
(false, false) => None,
}
}
/// From where relative to the node with this wildcard info a write access could happen.
fn write_access_relatedness(&self) -> Option<WildcardAccessRelatedness> {
let has_foreign = self.max_foreign_access == WildcardAccessLevel::Write;
let has_local = self.max_local_access() == WildcardAccessLevel::Write;
use WildcardAccessRelatedness as E;
match (has_foreign, has_local) {
(true, true) => Some(E::EitherAccess),
(true, false) => Some(E::ForeignAccess),
(false, true) => Some(E::LocalAccess),
(false, false) => None,
}
}
/// Gets the access tracking information for a new child node of a parent with this
/// wildcard info.
/// The new node doesn't have any child reads/writes, but calculates `max_foreign_access`
/// from its parent.
pub fn for_new_child(&self) -> Self {
Self {
max_foreign_access: max(self.max_foreign_access, self.max_local_access()),
..Default::default()
}
}
/// Crates the initial `WildcardState` for a wildcard root.
/// This has `max_foreign_access==Write` as it actually is the child of *some* exposed node
/// through which we can receive foreign accesses.
///
/// This is different from the main root which has `max_foreign_access==None`, since there
/// cannot be a foreign access to the root of the allocation.
pub fn for_wildcard_root() -> Self {
Self { max_foreign_access: WildcardAccessLevel::Write, ..Default::default() }
}
/// Pushes the nodes of `children` onto the stack who's `max_foreign_access`
/// needs to be updated.
///
/// * `children`: A list of nodes with the same parent. `children` doesn't
/// necessarily have to contain all children of parent, but can just be
/// a subset.
///
/// * `child_reads`, `child_writes`: How many of `children` have `max_local_access()`
/// of at least `read`/`write`
///
/// * `new_foreign_access`, `old_foreign_access`:
/// The max possible access level that is foreign to all `children`
/// (i.e., it is not local to *any* of them).
/// This can be calculated as the max of the parent's `exposed_as()`, `max_foreign_access`
/// and of all `max_local_access()` of any nodes with the same parent that are
/// not listed in `children`.
///
/// This access level changed from `old` to `new`, which is why we need to
/// update `children`.
fn push_relevant_children(
stack: &mut Vec<(UniIndex, WildcardAccessLevel)>,
new_foreign_access: WildcardAccessLevel,
old_foreign_access: WildcardAccessLevel,
child_reads: u16,
child_writes: u16,
children: impl Iterator<Item = UniIndex>,
wildcard_accesses: &UniValMap<WildcardState>,
) {
use WildcardAccessLevel::*;
// Nothing changed so we don't need to update anything.
if new_foreign_access == old_foreign_access {
return;
}
// We need to consider that the children's `max_local_access()` affect each
// other's `max_foreign_access`, but do not affect their own `max_foreign_access`.
// The new `max_foreign_acces` for children with `max_local_access()==Write`.
let write_foreign_access = max(
new_foreign_access,
if child_writes > 1 {
// There exists at least one more child with exposed write access.
// This means that a foreign write through that node is possible.
Write
} else if child_reads > 1 {
// There exists at least one more child with exposed read access,
// but no other with write access.
// This means that a foreign read but no write through that node
// is possible.
Read
} else {
// There are no other nodes with read or write access.
// This means no foreign writes through other children are possible.
None
},
);
// The new `max_foreign_acces` for children with `max_local_access()==Read`.
let read_foreign_access = max(
new_foreign_access,
if child_writes > 0 {
// There exists at least one child with write access (and it's not this one).
Write
} else if child_reads > 1 {
// There exists at least one more child with exposed read access,
// but no other with write access.
Read
} else {
// There are no other nodes with read or write access,
None
},
);
// The new `max_foreign_acces` for children with `max_local_access()==None`.
let none_foreign_access = max(
new_foreign_access,
if child_writes > 0 {
// There exists at least one child with write access (and it's not this one).
Write
} else if child_reads > 0 {
// There exists at least one child with read access (and it's not this one),
// but none with write access.
Read
} else {
// No children are exposed as read or write.
None
},
);
stack.extend(children.filter_map(|child| {
let state = wildcard_accesses.get(child).cloned().unwrap_or_default();
let new_foreign_access = match state.max_local_access() {
Write => write_foreign_access,
Read => read_foreign_access,
None => none_foreign_access,
};
if new_foreign_access != state.max_foreign_access {
Some((child, new_foreign_access))
} else {
Option::None
}
}));
}
/// Update the tracking information of a tree, to reflect that the node specified by `id` is
/// now exposed with `new_exposed_as`.
/// now exposed with `new_exposed_as` permission.
///
/// Propagates the Willard access information over the tree. This needs to be called every
/// time the access level of an exposed node changes, to keep the state in sync with
/// the rest of the tree.
///
/// * `from`: The previous access level of the exposed node.
/// Set to `None` if the node was not exposed before.
/// * `to`: The new access level.
pub fn update_exposure(
id: UniIndex,
new_exposed_as: WildcardAccessLevel,
&mut self,
nodes: &UniValMap<Node>,
wildcard_accesses: &mut UniValMap<WildcardState>,
id: UniIndex,
from: WildcardAccessLevel,
to: WildcardAccessLevel,
) {
let mut entry = wildcard_accesses.entry(id);
let src_state = entry.or_insert(Default::default());
let old_exposed_as = src_state.exposed_as;
// If the exposure doesn't change, then we don't need to update anything.
if old_exposed_as == new_exposed_as {
if from == to {
return;
}
let src_old_local_access = src_state.max_local_access();
src_state.exposed_as = new_exposed_as;
let src_new_local_access = src_state.max_local_access();
// Stack of nodes for which the max_foreign_access field needs to be updated.
// Will be filled with the children of this node and its parents children before
// we begin downwards traversal.
let mut stack: Vec<(UniIndex, WildcardAccessLevel)> = Vec::new();
// Add the direct children of this node to the stack.
{
// Update the counts of this node and all its ancestors.
let mut next_id = Some(id);
while let Some(id) = next_id {
let node = nodes.get(id).unwrap();
Self::push_relevant_children(
&mut stack,
// new_foreign_access
max(src_state.max_foreign_access, new_exposed_as),
// old_foreign_access
max(src_state.max_foreign_access, old_exposed_as),
// Consider all children.
src_state.child_reads,
src_state.child_writes,
node.children.iter().copied(),
wildcard_accesses,
);
}
// We need to propagate the tracking info up the tree, for this we traverse
// up the parents.
// We can skip propagating info to the parent and siblings of a node if its
// access didn't change.
{
// The child from which we came.
let mut child = id;
// This is the `max_local_access()` of the child we came from, before
// this update...
let mut old_child_access = src_old_local_access;
// and after this update.
let mut new_child_access = src_new_local_access;
while let Some(parent_id) = nodes.get(child).unwrap().parent {
let parent_node = nodes.get(parent_id).unwrap();
let mut entry = wildcard_accesses.entry(parent_id);
let parent_state = entry.or_insert(Default::default());
let mut state = self.0.entry(id);
let state = state.or_insert(Default::default());
let old_parent_local_access = parent_state.max_local_access();
use WildcardAccessLevel::*;
// Updating this node's tracking state for its children.
match (old_child_access, new_child_access) {
(None | Read, Write) => parent_state.child_writes += 1,
(Write, None | Read) => parent_state.child_writes -= 1,
_ => {}
}
match (old_child_access, new_child_access) {
(None, Read | Write) => parent_state.child_reads += 1,
(Read | Write, None) => parent_state.child_reads -= 1,
_ => {}
}
let new_parent_local_access = parent_state.max_local_access();
{
// We need to update the `max_foreign_access` of `child`'s
// siblings. For this we can reuse the `push_relevant_children`
// function.
//
// We pass it just the siblings without child itself. Since
// `child`'s `max_local_access()` is foreign to all of its
// siblings we can pass it as part of the foreign access.
let parent_access =
max(parent_state.exposed_as, parent_state.max_foreign_access);
// This is how many of `child`'s siblings have read/write local access.
// If `child` itself has access, then we need to subtract its access from the count.
let sibling_reads =
parent_state.child_reads - if new_child_access >= Read { 1 } else { 0 };
let sibling_writes =
parent_state.child_writes - if new_child_access >= Write { 1 } else { 0 };
Self::push_relevant_children(
&mut stack,
// new_foreign_access
max(parent_access, new_child_access),
// old_foreign_access
max(parent_access, old_child_access),
// Consider only siblings of child.
sibling_reads,
sibling_writes,
parent_node.children.iter().copied().filter(|id| child != *id),
wildcard_accesses,
);
}
if old_parent_local_access == new_parent_local_access {
// We didn't change `max_local_access()` for parent, so we don't need to propagate further upwards.
break;
}
old_child_access = old_parent_local_access;
new_child_access = new_parent_local_access;
child = parent_id;
use WildcardAccessLevel::*;
match (from, to) {
(None | Read, Write) => state.local_writes += 1,
(Write, None | Read) => state.local_writes -= 1,
_ => {}
}
match (from, to) {
(None, Read | Write) => state.local_reads += 1,
(Read | Write, None) => state.local_reads -= 1,
_ => {}
}
next_id = node.parent;
}
// Traverses down the tree to update max_foreign_access fields of children and cousins who need to be updated.
while let Some((id, new_access)) = stack.pop() {
let node = nodes.get(id).unwrap();
let mut entry = wildcard_accesses.entry(id);
let state = entry.or_insert(Default::default());
let old_access = state.max_foreign_access;
state.max_foreign_access = new_access;
Self::push_relevant_children(
&mut stack,
// new_foreign_access
max(state.exposed_as, new_access),
// old_foreign_access
max(state.exposed_as, old_access),
// Consider all children.
state.child_reads,
state.child_writes,
node.children.iter().copied(),
wildcard_accesses,
);
}
}
/// Removes a node from the datastructure.
///
/// The caller needs to ensure that the node does not have any children.
pub fn remove(&mut self, idx: UniIndex) {
self.0.remove(idx);
}
}
@ -428,25 +195,28 @@ impl Tree {
pub fn expose_tag(&mut self, tag: BorTag, protected: bool) {
let id = self.tag_mapping.get(&tag).unwrap();
let node = self.nodes.get_mut(id).unwrap();
node.is_exposed = true;
let node = self.nodes.get(id).unwrap();
if !node.is_exposed {
node.is_exposed = true;
let node = self.nodes.get(id).unwrap();
// When the first tag gets exposed then we initialize the
// wildcard state for every node and location in the tree.
for (_, loc) in self.locations.iter_mut_all() {
let perm = loc
.perms
.get(id)
.map(|p| p.permission())
.unwrap_or_else(|| node.default_location_state().permission());
for (_, loc) in self.locations.iter_mut_all() {
let perm = loc
.perms
.get(id)
.map(|p| p.permission())
.unwrap_or_else(|| node.default_location_state().permission());
let access_type = perm.strongest_allowed_local_access(protected);
WildcardState::update_exposure(
id,
access_type,
&self.nodes,
&mut loc.wildcard_accesses,
);
let access_level = perm.strongest_allowed_local_access(protected);
// An unexposed node gets treated as access level `None`. Therefore,
// the initial exposure transitions from `None` to the node's actual
// `access_level`.
loc.exposed_cache.update_exposure(
&self.nodes,
id,
WildcardAccessLevel::None,
access_level,
);
}
}
}
@ -457,10 +227,19 @@ impl Tree {
// We check if the node is already exposed, as we don't want to expose any
// nodes which aren't already exposed.
if self.nodes.get(idx).unwrap().is_exposed {
// Updates the exposure to the new permission on every location.
self.expose_tag(tag, /* protected */ false);
let node = self.nodes.get(idx).unwrap();
if node.is_exposed {
for (_, loc) in self.locations.iter_mut_all() {
let perm = loc
.perms
.get(idx)
.map(|p| p.permission())
.unwrap_or_else(|| node.default_location_state().permission());
// We are transitioning from protected to unprotected.
let old_access_type = perm.strongest_allowed_local_access(/*protected*/ true);
let access_type = perm.strongest_allowed_local_access(/*protected*/ false);
loc.exposed_cache.update_exposure(&self.nodes, idx, old_access_type, access_type);
}
}
}
}
@ -472,20 +251,15 @@ impl Tree {
pub fn verify_wildcard_consistency(&self, global: &GlobalState) {
// We rely on the fact that `roots` is ordered according to tag from low to high.
assert!(self.roots.is_sorted_by_key(|idx| self.nodes.get(*idx).unwrap().tag));
let main_root_idx = self.roots[0];
let protected_tags = &global.borrow().protected_tags;
for (_, loc) in self.locations.iter_all() {
let wildcard_accesses = &loc.wildcard_accesses;
let exposed_cache = &loc.exposed_cache;
let perms = &loc.perms;
// Checks if accesses is empty.
if wildcard_accesses.is_empty() {
return;
}
for (id, node) in self.nodes.iter() {
let state = wildcard_accesses.get(id).unwrap();
let state = exposed_cache.0.get(id).cloned().unwrap_or_default();
let expected_exposed_as = if node.is_exposed {
let exposed_as = if node.is_exposed {
let perm =
perms.get(id).copied().unwrap_or_else(|| node.default_location_state());
@ -495,72 +269,25 @@ impl Tree {
WildcardAccessLevel::None
};
// The foreign wildcard accesses possible at a node are determined by which
// accesses can originate from their siblings, their parent, and from above
// their parent.
let expected_max_foreign_access = if let Some(parent) = node.parent {
let parent_node = self.nodes.get(parent).unwrap();
let parent_state = wildcard_accesses.get(parent).unwrap();
let max_sibling_access = parent_node
.children
.iter()
.copied()
.filter(|child| *child != id)
.map(|child| {
let state = wildcard_accesses.get(child).unwrap();
state.max_local_access()
})
.fold(WildcardAccessLevel::None, max);
max_sibling_access
.max(parent_state.max_foreign_access)
.max(parent_state.exposed_as)
} else {
if main_root_idx == id {
// There can never be a foreign access to the root of the allocation.
// So its foreign access level is always `None`.
WildcardAccessLevel::None
} else {
// For wildcard roots any access on a different subtree can be foreign
// to it. So a wildcard root has the maximum possible foreign access
// level.
WildcardAccessLevel::Write
}
};
// Count how many children can be the source of wildcard reads or writes
// (either directly, or via their children).
let child_accesses = node.children.iter().copied().map(|child| {
let state = wildcard_accesses.get(child).unwrap();
state.max_local_access()
});
let expected_child_reads =
child_accesses.clone().filter(|a| *a >= WildcardAccessLevel::Read).count();
let expected_child_writes =
child_accesses.filter(|a| *a >= WildcardAccessLevel::Write).count();
let (child_reads, child_writes) = node
.children
.iter()
.copied()
.map(|id| exposed_cache.0.get(id).cloned().unwrap_or_default())
.fold((0, 0), |acc, wc| (acc.0 + wc.local_reads, acc.1 + wc.local_writes));
let expected_reads =
child_reads + u16::from(exposed_as >= WildcardAccessLevel::Read);
let expected_writes =
child_writes + u16::from(exposed_as >= WildcardAccessLevel::Write);
assert_eq!(
expected_exposed_as, state.exposed_as,
"tag {:?} (id:{id:?}) should be exposed as {expected_exposed_as:?} but is exposed as {:?}",
node.tag, state.exposed_as
state.local_reads, expected_reads,
"expected {:?}'s (id:{id:?}) local_reads to be {expected_reads:?} instead of {:?} (child_reads: {child_reads:?}, exposed_as: {exposed_as:?})",
node.tag, state.local_reads
);
assert_eq!(
expected_max_foreign_access, state.max_foreign_access,
"expected {:?}'s (id:{id:?}) max_foreign_access to be {:?} instead of {:?}",
node.tag, expected_max_foreign_access, state.max_foreign_access
);
let child_reads: usize = state.child_reads.into();
assert_eq!(
expected_child_reads, child_reads,
"expected {:?}'s (id:{id:?}) child_reads to be {} instead of {}",
node.tag, expected_child_reads, child_reads
);
let child_writes: usize = state.child_writes.into();
assert_eq!(
expected_child_writes, child_writes,
"expected {:?}'s (id:{id:?}) child_writes to be {} instead of {}",
node.tag, expected_child_writes, child_writes
state.local_writes, expected_writes,
"expected {:?}'s (id:{id:?}) local_writes to be {expected_writes:?} instead of {:?} (child_writes: {child_writes:?}, exposed_as: {exposed_as:?})",
node.tag, state.local_writes
);
}
}

View file

@ -371,7 +371,7 @@ impl AccessType {
if let Some(size) = size {
if size == Size::ZERO {
// In this case there were multiple read accesss with different sizes and then a write.
// In this case there were multiple read accesses with different sizes and then a write.
// We will be reporting *one* of the other reads, but we don't have enough information
// to determine which one had which size.
assert!(self == AccessType::AtomicLoad);

View file

@ -62,7 +62,7 @@ impl GlobalStateInner {
let entry = match self.base_addr.entry(alloc_id) {
Entry::Occupied(occupied_entry) => {
// Looks like some other thread allocated this for us
// between when we released the read lock and aquired the write lock,
// between when we released the read lock and acquired the write lock,
// so we just return that value.
return interp_ok(*occupied_entry.get());
}

View file

@ -252,7 +252,7 @@ impl GenmcCtx {
/// Inform GenMC about an atomic load.
/// Returns that value that the load should read.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitialized.
pub(crate) fn atomic_load<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
@ -275,7 +275,7 @@ impl GenmcCtx {
/// Inform GenMC about an atomic store.
/// Returns `true` if the stored value should be reflected in Miri's memory.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitialized.
pub(crate) fn atomic_store<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
@ -320,7 +320,7 @@ impl GenmcCtx {
///
/// Returns `(old_val, Option<new_val>)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitialized.
pub(crate) fn atomic_rmw_op<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
@ -345,7 +345,7 @@ impl GenmcCtx {
/// Returns `(old_val, Option<new_val>)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitialized.
pub(crate) fn atomic_exchange<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
@ -370,7 +370,7 @@ impl GenmcCtx {
///
/// Returns the old value read by the compare exchange, optionally the value that Miri should write back to its memory, and whether the compare-exchange was a success or not.
///
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitalized.
/// `old_value` is the value that a non-atomic load would read here, or `None` if the memory is uninitialized.
pub(crate) fn atomic_compare_exchange<'tcx>(
&self,
ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,

View file

@ -30,7 +30,7 @@ pub fn run_genmc_mode<'tcx>(
config: &MiriConfig,
eval_entry: impl Fn(Rc<GenmcCtx>) -> Result<(), NonZeroI32>,
) -> Result<(), NonZeroI32> {
// Check for supported target: endianess and pointer size must match the host.
// Check for supported target: endianness and pointer size must match the host.
if tcx.data_layout.endian != Endian::Little || tcx.data_layout.pointer_size().bits() != 64 {
tcx.dcx().fatal("GenMC only supports 64bit little-endian targets");
}

View file

@ -389,7 +389,7 @@ impl<'tcx> StoreBuffer {
})
.filter(|&store_elem| {
if is_seqcst && store_elem.is_seqcst {
// An SC load needs to ignore all but last store maked SC (stores not marked SC are not
// An SC load needs to ignore all but last store marked SC (stores not marked SC are not
// affected)
let include = !found_sc;
found_sc = true;

View file

@ -113,7 +113,7 @@ pub struct MiriConfig {
pub float_nondet: bool,
/// Whether floating-point operations can have a non-deterministic rounding error.
pub float_rounding_error: FloatRoundingErrorMode,
/// Whether Miri artifically introduces short reads/writes on file descriptors.
/// Whether Miri artificially introduces short reads/writes on file descriptors.
pub short_fd_operations: bool,
/// A list of crates that are considered user-relevant.
pub user_relevant_crates: Vec<String>,

View file

@ -138,7 +138,7 @@ pub fn iter_exported_symbols<'tcx>(
}
// Next, all our dependencies.
// `dependency_formats` includes all the transitive informations needed to link a crate,
// `dependency_formats` includes all the transitive information needed to link a crate,
// which is what we need here since we need to dig out `exported_symbols` from all transitive
// dependencies.
let dependency_formats = tcx.dependency_formats(());
@ -1148,7 +1148,7 @@ impl ToUsize for u32 {
}
/// Similarly, a maximum address size of `u64` is assumed widely here, so let's have ergonomic
/// converion from `usize` to `u64`.
/// conversion from `usize` to `u64`.
pub trait ToU64 {
fn to_u64(self) -> u64;
}

View file

@ -654,7 +654,7 @@ pub struct MiriMachine<'tcx> {
/// Whether floating-point operations can have a non-deterministic rounding error.
pub float_rounding_error: FloatRoundingErrorMode,
/// Whether Miri artifically introduces short reads/writes on file descriptors.
/// Whether Miri artificially introduces short reads/writes on file descriptors.
pub short_fd_operations: bool,
}
@ -1802,7 +1802,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
// We have to skip the frame that is just being popped.
ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
}
// tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
// tracing-tree can automatically annotate scope changes, but it gets very confused by our
// concurrency and what it prints is just plain wrong. So we print our own information
// instead. (Cc https://github.com/rust-lang/miri/issues/2266)
info!("Leaving {}", ecx.frame().instance());

View file

@ -58,7 +58,33 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.write_immediate(*res_lane, &dest)?;
}
}
// Vector table lookup: each index selects a byte from the 16-byte table, out-of-range -> 0.
// Used to implement vtbl1_u8 function.
// LLVM does not have a portable shuffle that takes non-const indices
// so we need to implement this ourselves.
// https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8
"neon.tbl1.v16i8" => {
let [table, indices] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (table, table_len) = this.project_to_simd(table)?;
let (indices, idx_len) = this.project_to_simd(indices)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
assert_eq!(table_len, 16);
assert_eq!(idx_len, dest_len);
for i in 0..dest_len {
let idx = this.read_immediate(&this.project_index(&indices, i)?)?;
let idx_u = idx.to_scalar().to_u8()?;
let val = if u64::from(idx_u) < table_len {
let t = this.read_immediate(&this.project_index(&table, idx_u.into())?)?;
t.to_scalar()
} else {
Scalar::from_u8(0)
};
this.write_scalar(val, &this.project_index(&dest, i)?)?;
}
}
_ => return interp_ok(EmulateItemResult::NotSupported),
}
interp_ok(EmulateItemResult::NeedsReturn)

View file

@ -31,7 +31,7 @@ pub struct Supervisor {
/// Used for synchronisation, allowing us to receive confirmation that the
/// parent process has handled the request from `message_tx`.
confirm_rx: ipc::IpcReceiver<Confirmation>,
/// Receiver for memory acceses that ocurred during the FFI call.
/// Receiver for memory accesses that occurred during the FFI call.
event_rx: ipc::IpcReceiver<MemEvents>,
}

View file

@ -395,8 +395,6 @@ fn capstone_find_events(
_ => (),
}
}
// FIXME: arm64
_ => unimplemented!(),
}
false

View file

@ -316,7 +316,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// The new path is still absolute on Windows.
path.remove(0);
}
// If this starts withs a `\` but not a `\\`, then this was absolute on Unix but is
// If this starts with a `\` but not a `\\`, then this was absolute on Unix but is
// relative on Windows (relative to "the root of the current directory", e.g. the
// drive letter).
else if path.first() == Some(&sep) && path.get(1) != Some(&sep) {

View file

@ -401,11 +401,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
let timeout_anchor = if flags == 0 {
// No flags set, the timespec should be interperted as a duration
// No flags set, the timespec should be interpreted as a duration
// to sleep for
TimeoutAnchor::Relative
} else if flags == this.eval_libc_i32("TIMER_ABSTIME") {
// Only flag TIMER_ABSTIME set, the timespec should be interperted as
// Only flag TIMER_ABSTIME set, the timespec should be interpreted as
// an absolute time.
TimeoutAnchor::Absolute
} else {

View file

@ -1045,7 +1045,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
&[Os::Linux, Os::FreeBsd, Os::Illumos, Os::Solaris, Os::Android, Os::MacOs],
link_name,
)?;
// This function looks and behaves excatly like miri_start_unwind.
// This function looks and behaves exactly like miri_start_unwind.
let [payload] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
this.handle_miri_start_unwind(payload)?;
return interp_ok(EmulateItemResult::NeedsUnwind);

View file

@ -169,7 +169,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let Some(futex_ref) =
this.get_sync_or_init(obj, |_| FreeBsdFutex { futex: Default::default() })
else {
// From Linux implemenation:
// From Linux implementation:
// No AllocId, or no live allocation at that AllocId.
// Return an error code. (That seems nicer than silently doing something non-intuitive.)
// This means that if an address gets reused by a new allocation,

View file

@ -165,7 +165,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)? {
ThreadNameResult::Ok => Scalar::from_u32(0),
ThreadNameResult::NameTooLong => this.eval_libc("ERANGE"),
// Act like we faild to open `/proc/self/task/$tid/comm`.
// Act like we failed to open `/proc/self/task/$tid/comm`.
ThreadNameResult::ThreadNotFound => this.eval_libc("ENOENT"),
};
this.write_scalar(res, dest)?;
@ -186,7 +186,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)? {
ThreadNameResult::Ok => Scalar::from_u32(0),
ThreadNameResult::NameTooLong => unreachable!(),
// Act like we faild to open `/proc/self/task/$tid/comm`.
// Act like we failed to open `/proc/self/task/$tid/comm`.
ThreadNameResult::ThreadNotFound => this.eval_libc("ENOENT"),
}
} else {

View file

@ -176,7 +176,7 @@ impl EpollInterestTable {
if let Some(epolls) = self.0.remove(&id) {
for epoll in epolls.iter().filter_map(|(_id, epoll)| epoll.upgrade()) {
// This is a still-live epoll with interest in this FD. Remove all
// relevent interests (including from the ready set).
// relevant interests (including from the ready set).
epoll
.interest_list
.borrow_mut()

View file

@ -169,7 +169,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let is_shared = flags == shared;
let timeout = clock_timeout.map(|(_, anchor, timeout)| {
// The only clock that is currenlty supported is the monotonic clock.
// The only clock that is currently supported is the monotonic clock.
// While the deadline argument of `os_sync_wait_on_address_with_deadline`
// is actually not in nanoseconds but in the units of `mach_current_time`,
// the two are equivalent in miri.

View file

@ -1199,7 +1199,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"`_Unwind_RaiseException` is not supported on non-MinGW Windows",
);
}
// This function looks and behaves excatly like miri_start_unwind.
// This function looks and behaves exactly like miri_start_unwind.
let [payload] = this.check_shim_sig(
shim_sig!(extern "C" fn(*mut _) -> unwind::libunwind::_Unwind_Reason_Code),
link_name,

View file

@ -67,7 +67,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
)
}
/// Returns `true` if we were succssful, `false` if we would block.
/// Returns `true` if we were successful, `false` if we would block.
fn init_once_try_begin(
&mut self,
init_once_ref: &InitOnceRef,

View file

@ -194,7 +194,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Used to implement the _mm256_sign_epi{8,16,32} functions.
// Negates elements from `left` when the corresponding element in
// `right` is negative. If an element from `right` is zero, zero
// is writen to the corresponding output element.
// is written to the corresponding output element.
// Basically, we multiply `left` with `right.signum()`.
"psign.b" | "psign.w" | "psign.d" => {
let [left, right] =

View file

@ -44,7 +44,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let right = if is_64_bit { right.to_u64()? } else { u64::from(right.to_u32()?) };
let result = match unprefixed_name {
// Extract a contigous range of bits from an unsigned integer.
// Extract a contiguous range of bits from an unsigned integer.
// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bextr_u32
"bextr" => {
let start = u32::try_from(right & 0xff).unwrap();

View file

@ -24,8 +24,8 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Prefix should have already been checked.
let unprefixed_name = link_name.as_str().strip_prefix("llvm.x86.sse.").unwrap();
// All these intrinsics operate on 128-bit (f32x4) SIMD vectors unless stated otherwise.
// Many intrinsic names are sufixed with "ps" (packed single) or "ss" (scalar single),
// where single means single precision floating point (f32). "ps" means thet the operation
// Many intrinsic names are suffixed with "ps" (packed single) or "ss" (scalar single),
// where single means single precision floating point (f32). "ps" means that the operation
// is performed on each element of the vector, while "ss" means that the operation is
// performed only on the first element, copying the remaining elements from the input
// vector (for binary operations, from the left-hand side).

View file

@ -26,14 +26,14 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// These intrinsics operate on 128-bit (f32x4, f64x2, i8x16, i16x8, i32x4, i64x2) SIMD
// vectors unless stated otherwise.
// Many intrinsic names are sufixed with "ps" (packed single), "ss" (scalar signle),
// Many intrinsic names are suffixed with "ps" (packed single), "ss" (scalar single),
// "pd" (packed double) or "sd" (scalar double), where single means single precision
// floating point (f32) and double means double precision floating point (f64). "ps"
// and "pd" means thet the operation is performed on each element of the vector, while
// and "pd" means that the operation is performed on each element of the vector, while
// "ss" and "sd" means that the operation is performed only on the first element, copying
// the remaining elements from the input vector (for binary operations, from the left-hand
// side).
// Intrinsincs sufixed with "epiX" or "epuX" operate with X-bit signed or unsigned
// Intrinsics suffixed with "epiX" or "epuX" operate with X-bit signed or unsigned
// vectors.
match unprefixed_name {
// Used to implement the _mm_sad_epu8 function.

View file

@ -115,7 +115,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
round_all::<rustc_apfloat::ieee::Double>(this, op, rounding, dest)?;
}
// Used to implement the _mm_minpos_epu16 function.
// Find the minimum unsinged 16-bit integer in `op` and
// Find the minimum unsigned 16-bit integer in `op` and
// returns its value and position.
"phminposuw" => {
let [op] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;

View file

@ -213,7 +213,7 @@ fn deconstruct_args<'tcx>(
};
// The fourth letter of each string comparison intrinsic is either 'e' for "explicit" or 'i' for "implicit".
// The distinction will correspond to the intrinsics type signature. In this constext, "explicit" and "implicit"
// The distinction will correspond to the intrinsics type signature. In this context, "explicit" and "implicit"
// refer to the way the string length is determined. The length is either passed explicitly in the "explicit"
// case or determined by a null terminator in the "implicit" case.
let is_explicit = match unprefixed_name.as_bytes().get(4) {
@ -297,7 +297,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
deconstruct_args(unprefixed_name, this, link_name, abi, args)?;
let mask = compare_strings(this, &str1, &str2, len, imm)?;
// The sixth bit inside the immediate byte distiguishes
// The sixth bit inside the immediate byte distinguishes
// between a bit mask or a byte mask when generating a mask.
if imm & 0b100_0000 != 0 {
let (array_layout, size) = if imm & USE_WORDS != 0 {
@ -347,7 +347,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let mask = compare_strings(this, &str1, &str2, len, imm)?;
let len = default_len::<u32>(imm);
// The sixth bit inside the immediate byte distiguishes between the least
// The sixth bit inside the immediate byte distinguishes between the least
// significant bit and the most significant bit when generating an index.
let result = if imm & 0b100_0000 != 0 {
// most significant bit

View file

@ -68,7 +68,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Used to implement the _mm_sign_epi{8,16,32} functions.
// Negates elements from `left` when the corresponding element in
// `right` is negative. If an element from `right` is zero, zero
// is writen to the corresponding output element.
// is written to the corresponding output element.
// Basically, we multiply `left` with `right.signum()`.
"psign.b.128" | "psign.w.128" | "psign.d.128" => {
let [left, right] =

View file

@ -46,9 +46,9 @@ checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
[[package]]
name = "bytes"
version = "1.10.1"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
[[package]]
name = "cfg-if"

View file

@ -1 +1,3 @@
fn main() {}
fn main() {
unreachable!()
}

View file

@ -11,7 +11,6 @@ help: ALLOC was allocated here:
|
LL | let mut buf = vec![0u8; 15];
| ^^^^^^^^^^^^^
= note: this error originates in the macro `vec` (in Nightly builds, run with -Z macro-backtrace for more info)
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace

View file

@ -5,7 +5,7 @@ enum Never {}
fn main() {
unsafe {
match *std::ptr::null::<Result<Never, Never>>() {
//~^ ERROR: read discriminant of an uninhabited enum variant
//~^ ERROR: read discriminant of an uninhabited enum variant
Ok(_) => {
lol();
}

View file

@ -22,7 +22,8 @@ fn main() {
// After rust-lang/rust#138961, constructing the closure performs a reborrow of r.
// Nevertheless, the discriminant is only actually inspected when the closure
// is called.
match r { //~ ERROR: read discriminant of an uninhabited enum variant
match r {
//~^ ERROR: read discriminant of an uninhabited enum variant
E::V0 => {}
E::V1(_) => {}
}

View file

@ -4,8 +4,8 @@
#[repr(C)]
#[allow(dead_code)]
enum E {
V0, // discriminant: 0
V1(!), // 1
V0, // discriminant: 0
V1(!), // 1
}
fn main() {
@ -14,7 +14,8 @@ fn main() {
let val = 1u32;
let ptr = (&raw const val).cast::<E>();
let r = unsafe { &*ptr };
match r { //~ ERROR: read discriminant of an uninhabited enum variant
match r {
//~^ ERROR: read discriminant of an uninhabited enum variant
E::V0 => {}
E::V1(_) => {}
}

View file

@ -20,12 +20,13 @@ fn main() {
let x: &[u8; 2] = &[21, 37];
let y: &Exhaustive = std::mem::transmute(x);
match y {
Exhaustive::A(_) => {},
Exhaustive::A(_) => {}
}
let y: &NonExhaustive = std::mem::transmute(x);
match y { //~ ERROR: enum value has invalid tag
NonExhaustive::A(_) => {},
match y {
//~^ ERROR: enum value has invalid tag
NonExhaustive::A(_) => {}
}
}
}

View file

@ -28,7 +28,8 @@ fn main() {
_ => {}
}
match *nexh { //~ ERROR: memory is uninitialized
match *nexh {
//~^ ERROR: memory is uninitialized
NonExhaustive::A(ref _val) => {}
_ => {}
}

View file

@ -0,0 +1,17 @@
// NOTE: this is essentially a smoke-test, with more comprehensive tests living in the rustc
// repository at tests/ui/consts/const-eval/ub-enum.rs
#![feature(never_type)]
#[repr(C)]
#[allow(dead_code)]
enum E {
V1, // discriminant: 0
V2(!), // 1
}
fn main() {
unsafe {
std::mem::transmute::<u32, E>(1);
//~^ ERROR: encountered an uninhabited enum variant
}
}

View file

@ -0,0 +1,13 @@
error: Undefined Behavior: constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
--> tests/fail/validity/uninhabited_variant.rs:LL:CC
|
LL | std::mem::transmute::<u32, E>(1);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -1,5 +1,4 @@
#![allow(unused)]
#[allow(unused)]
#[repr(u16)]
enum DeviceKind {
Nil = 0,
@ -19,5 +18,5 @@ fn main() {
let x = None::<(DeviceInfo, u8)>;
let y = None::<(DeviceInfo, u16)>;
let z = None::<(DeviceInfo, u64)>;
format!("{} {} {}", x.is_some(), y.is_some(), y.is_some());
let _out = format!("{} {} {}", x.is_some(), y.is_some(), z.is_some());
}

View file

@ -1,5 +1,4 @@
#![feature(async_fn_traits)]
#![allow(unused)]
use std::future::Future;
use std::ops::{AsyncFn, AsyncFnMut, AsyncFnOnce};

View file

@ -4,17 +4,19 @@
use std::arch::aarch64::*;
use std::arch::is_aarch64_feature_detected;
use std::mem::transmute;
fn main() {
assert!(is_aarch64_feature_detected!("neon"));
unsafe {
test_neon();
test_vpmaxq_u8();
test_tbl1_v16i8_basic();
}
}
#[target_feature(enable = "neon")]
unsafe fn test_neon() {
unsafe fn test_vpmaxq_u8() {
// Adapted from library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
unsafe fn test_vpmaxq_u8() {
let a = vld1q_u8([1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8].as_ptr());
@ -38,3 +40,28 @@ unsafe fn test_neon() {
}
test_vpmaxq_u8_is_unsigned();
}
#[target_feature(enable = "neon")]
fn test_tbl1_v16i8_basic() {
unsafe {
// table = 0..15
let table: uint8x16_t =
transmute::<[u8; 16], _>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
// indices
let idx: uint8x16_t =
transmute::<[u8; 16], _>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
let got = vqtbl1q_u8(table, idx);
let got_arr: [u8; 16] = transmute(got);
assert_eq!(got_arr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
// Also try different order and out-of-range indices (16, 255).
let idx2: uint8x16_t =
transmute::<[u8; 16], _>([15, 16, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
let got2 = vqtbl1q_u8(table, idx2);
let got2_arr: [u8; 16] = transmute(got2);
assert_eq!(got2_arr[0], 15);
assert_eq!(got2_arr[1], 0); // out-of-range
assert_eq!(got2_arr[2], 0); // out-of-range
assert_eq!(&got2_arr[3..16], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12][..]);
}
}

View file

@ -132,14 +132,19 @@ fn miri_config(
// (It's a separate crate, so we don't get an env var from cargo.)
program: miri_path()
.with_file_name(format!("cargo-miri{}", env::consts::EXE_SUFFIX)),
// There is no `cargo miri build` so we just use `cargo miri run`.
// Add `-Zbinary-dep-depinfo` since it is needed for bootstrap builds (and doesn't harm otherwise).
args: ["miri", "run", "--quiet", "-Zbinary-dep-depinfo"]
args: ["miri", "build", "-Zbinary-dep-depinfo"]
.into_iter()
.map(Into::into)
.collect(),
// Reset `RUSTFLAGS` to work around <https://github.com/rust-lang/rust/pull/119574#issuecomment-1876878344>.
envs: vec![("RUSTFLAGS".into(), None)],
envs: vec![
// Reset `RUSTFLAGS` to work around <https://github.com/rust-lang/rust/pull/119574#issuecomment-1876878344>.
("RUSTFLAGS".into(), None),
// Reset `MIRIFLAGS` because it caused trouble in the past and should not be needed.
("MIRIFLAGS".into(), None),
// Allow `cargo miri build`.
("MIRI_BUILD_TEST_DEPS".into(), Some("1".into())),
],
..CommandBuilder::cargo()
},
crate_manifest_path: Path::new("tests/deps").join("Cargo.toml"),
@ -361,15 +366,16 @@ fn run_dep_mode(target: String, args: impl Iterator<Item = OsString>) -> Result<
miri_config(&target, "", Mode::RunDep, Some(WithDependencies { bless: false }));
config.comment_defaults.base().custom.remove("edition"); // `./miri` adds an `--edition` in `args`, so don't set it twice
config.fill_host_and_target()?;
let dep_builder = BuildManager::one_off(config.clone());
// Only set these for the actual run, not the dep builder, so invalid flags do not fail
// the dependency build.
config.program.args = args.collect();
let test_config = TestConfig::one_off_runner(config, PathBuf::new());
let test_config = TestConfig::one_off_runner(config.clone(), PathBuf::new());
let build_manager = BuildManager::one_off(config);
let mut cmd = test_config.config.program.build(&test_config.config.out_dir);
cmd.arg("--target").arg(test_config.config.target.as_ref().unwrap());
// Build dependencies
test_config.apply_custom(&mut cmd, &build_manager).unwrap();
test_config.apply_custom(&mut cmd, &dep_builder).expect("failed to build dependencies");
if cmd.spawn()?.wait()?.success() { Ok(()) } else { std::process::exit(1) }
}