Auto merge of #2843 - jsoref:spelling, r=RalfJung

Spelling

This PR corrects misspellings identified by the [check-spelling action](https://github.com/marketplace/actions/check-spelling).

The misspellings have been reported at https://github.com/jsoref/miri/actions/runs/4699927237#summary-12751183650

The action reports that the changes in this PR would make it happy: https://github.com/jsoref/miri/actions/runs/4699927572#summary-12751184493
This commit is contained in:
bors 2023-04-14 18:44:15 +00:00
commit cd53a43a86
50 changed files with 77 additions and 77 deletions

View file

@ -403,7 +403,7 @@ to Miri failing to detect cases of undefined behavior in a program.
* `-Zmiri-retag-fields=<all|none|scalar>` controls when Stacked Borrows retagging recurses into
fields. `all` means it always recurses (like `-Zmiri-retag-fields`), `none` means it never
recurses, `scalar` (the default) means it only recurses for types where we would also emit
`noalias` annotations in the generated LLVM IR (types passed as indivudal scalars or pairs of
`noalias` annotations in the generated LLVM IR (types passed as individual scalars or pairs of
scalars). Setting this to `none` is **unsound**.
* `-Zmiri-tag-gc=<blocks>` configures how often the pointer tag garbage collector runs. The default
is to search for and remove unreachable tags once every `10000` basic blocks. Setting this to

View file

@ -81,7 +81,7 @@ fn main() {
"miri" => phase_cargo_miri(args),
"runner" => phase_runner(args, RunnerPhase::Cargo),
arg if arg == env::var("RUSTC").unwrap() => {
// If the first arg is equal to the RUSTC env ariable (which should be set at this
// If the first arg is equal to the RUSTC env variable (which should be set at this
// point), then we need to behave as rustc. This is the somewhat counter-intuitive
// behavior of having both RUSTC and RUSTC_WRAPPER set
// (see https://github.com/rust-lang/cargo/issues/10886).

View file

@ -120,7 +120,7 @@ impl rustc_driver::Callbacks for MiriBeRustCompilerCalls {
#[allow(rustc::potential_query_instability)] // rustc_codegen_ssa (where this code is copied from) also allows this lint
fn config(&mut self, config: &mut Config) {
if config.opts.prints.is_empty() && self.target_crate {
// Queries overriden here affect the data stored in `rmeta` files of dependencies,
// Queries overridden here affect the data stored in `rmeta` files of dependencies,
// which will be used later in non-`MIRI_BE_RUSTC` mode.
config.override_queries = Some(|_, local_providers, _| {
// `exported_symbols` and `reachable_non_generics` provided by rustc always returns

View file

@ -238,7 +238,7 @@ pub enum BorrowTrackerMethod {
}
impl BorrowTrackerMethod {
pub fn instanciate_global_state(self, config: &MiriConfig) -> GlobalState {
pub fn instantiate_global_state(self, config: &MiriConfig) -> GlobalState {
RefCell::new(GlobalStateInner::new(
self,
config.tracked_pointer_tags.clone(),

View file

@ -292,7 +292,7 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
.rev()
.find_map(|event| {
// First, look for a Creation event where the tag and the offset matches. This
// ensrues that we pick the right Creation event when a retag isn't uniform due to
// ensures that we pick the right Creation event when a retag isn't uniform due to
// Freeze.
let range = event.retag.range;
if event.retag.new_tag == tag

View file

@ -433,7 +433,7 @@ impl<'tcx> Stack {
let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from) else {
// The parent is a wildcard pointer or matched the unknown bottom.
// This is approximate. Nobody knows what happened, so forget everything.
// The new thing is SRW anyway, so we cannot push it "on top of the unkown part"
// The new thing is SRW anyway, so we cannot push it "on top of the unknown part"
// (for all we know, it might join an SRW group inside the unknown).
trace!("reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown");
self.set_unknown_bottom(global.next_ptr_tag);
@ -825,7 +825,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
Ok(Some(alloc_id))
}
/// Retags an indidual pointer, returning the retagged version.
/// Retags an individual pointer, returning the retagged version.
/// `kind` indicates what kind of reference is being created.
fn sb_retag_reference(
&mut self,

View file

@ -51,7 +51,7 @@ impl Stack {
// Note that the algorithm below is based on considering the tag at read_idx - 1,
// so precisely considering the tag at index 0 for removal when we have an unknown
// bottom would complicate the implementation. The simplification of not considering
// it does not have a significant impact on the degree to which the GC mititages
// it does not have a significant impact on the degree to which the GC mitigates
// memory growth.
let mut read_idx = 1;
let mut write_idx = read_idx;

View file

@ -283,7 +283,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
Ok(Some((alloc_id, new_tag)))
}
/// Retags an indidual pointer, returning the retagged version.
/// Retags an individual pointer, returning the retagged version.
fn tb_retag_reference(
&mut self,
val: &ImmTy<'tcx, Provenance>,

View file

@ -113,7 +113,7 @@ mod transition {
}
impl PermissionPriv {
/// Determines whether a transition that occured is compatible with the presence
/// Determines whether a transition that occurred is compatible with the presence
/// of a Protector. This is not included in the `transition` functions because
/// it would distract from the few places where the transition is modified
/// because of a protector, but not forbidden.

View file

@ -34,7 +34,7 @@ pub(super) struct LocationState {
/// Before initialization we still apply some preemptive transitions on
/// `permission` to know what to do in case it ever gets initialized,
/// but these can never cause any immediate UB. There can however be UB
/// the moment we attempt to initalize (i.e. child-access) because some
/// the moment we attempt to initialize (i.e. child-access) because some
/// foreign access done between the creation and the initialization is
/// incompatible with child accesses.
initialized: bool,

View file

@ -1199,7 +1199,7 @@ pub struct GlobalState {
/// A flag to mark we are currently performing
/// a data race free action (such as atomic access)
/// to supress the race detector
/// to suppress the race detector
ongoing_action_data_race_free: Cell<bool>,
/// Mapping of a vector index to a known set of thread

View file

@ -151,7 +151,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert_eq!(
init_once.status,
InitOnceStatus::Uninitialized,
"begining already begun or complete init once"
"beginning already begun or complete init once"
);
init_once.status = InitOnceStatus::Begun;
}

View file

@ -25,9 +25,9 @@ pub struct RangeObjectMap<T> {
#[derive(Clone, Debug, PartialEq)]
pub enum AccessType {
/// The access perfectly overlaps (same offset and range) with the exsiting allocation
/// The access perfectly overlaps (same offset and range) with the existing allocation
PerfectlyOverlapping(Position),
/// The access does not touch any exising allocation
/// The access does not touch any existing allocation
Empty(Position),
/// The access overlaps with one or more existing allocations
ImperfectlyOverlapping(Range<Position>),
@ -115,7 +115,7 @@ impl<T> RangeObjectMap<T> {
// want to repeat the binary search on each time, so we ask the caller to supply Position
pub fn insert_at_pos(&mut self, pos: Position, range: AllocRange, data: T) {
self.v.insert(pos, Elem { range, data });
// If we aren't the first element, then our start must be greater than the preivous element's end
// If we aren't the first element, then our start must be greater than the previous element's end
if pos > 0 {
assert!(self.v[pos - 1].range.end() <= range.start);
}

View file

@ -143,7 +143,7 @@ struct Condvar {
waiters: VecDeque<CondvarWaiter>,
/// Tracks the happens-before relationship
/// between a cond-var signal and a cond-var
/// wait during a non-suprious signal event.
/// wait during a non-spurious signal event.
/// Contains the clock of the last thread to
/// perform a futex-signal.
data_race: VClock,
@ -373,7 +373,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
.expect("invariant violation: lock_count == 0 iff the thread is unlocked");
if mutex.lock_count == 0 {
mutex.owner = None;
// The mutex is completely unlocked. Try transfering ownership
// The mutex is completely unlocked. Try transferring ownership
// to another thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(

View file

@ -821,7 +821,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
// Write the current thread-id, switch to the next thread later
// to treat this write operation as occuring on the current thread.
// to treat this write operation as occurring on the current thread.
if let Some(thread_info_place) = thread {
this.write_scalar(
Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
@ -830,7 +830,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
// Finally switch to new thread so that we can push the first stackframe.
// After this all accesses will be treated as occuring in the new thread.
// After this all accesses will be treated as occurring in the new thread.
let old_thread_id = this.set_active_thread(new_thread_id);
// Perform the function pointer load in the new thread frame.

View file

@ -24,7 +24,7 @@
//! However, this model lacks SC accesses and is therefore unusable by Miri (SC accesses are everywhere in library code).
//!
//! If you find anything that proposes a relaxed memory model that is C++20-consistent, supports all orderings Rust's atomic accesses
//! and fences accept, and is implementable (with operational semanitcs), please open a GitHub issue!
//! and fences accept, and is implementable (with operational semantics), please open a GitHub issue!
//!
//! One characteristic of this implementation, in contrast to some other notable operational models such as ones proposed in
//! Taming Release-Acquire Consistency by Ori Lahav et al. (<https://plv.mpi-sws.org/sra/paper.pdf>) or Promising Semantics noted above,
@ -32,8 +32,8 @@
//! and shared across all threads. This is more memory efficient but does require store elements (representing writes to a location) to record
//! information about reads, whereas in the other two models it is the other way round: reads points to the write it got its value from.
//! Additionally, writes in our implementation do not have globally unique timestamps attached. In the other two models this timestamp is
//! used to make sure a value in a thread's view is not overwritten by a write that occured earlier than the one in the existing view.
//! In our implementation, this is detected using read information attached to store elements, as there is no data strucutre representing reads.
//! used to make sure a value in a thread's view is not overwritten by a write that occurred earlier than the one in the existing view.
//! In our implementation, this is detected using read information attached to store elements, as there is no data structure representing reads.
//!
//! The C++ memory model is built around the notion of an 'atomic object', so it would be natural
//! to attach store buffers to atomic objects. However, Rust follows LLVM in that it only has
@ -48,7 +48,7 @@
//! One consequence of this difference is that safe/sound Rust allows for more operations on atomic locations
//! than the C++20 atomic API was intended to allow, such as non-atomically accessing
//! a previously atomically accessed location, or accessing previously atomically accessed locations with a differently sized operation
//! (such as accessing the top 16 bits of an AtomicU32). These senarios are generally undiscussed in formalisations of C++ memory model.
//! (such as accessing the top 16 bits of an AtomicU32). These scenarios are generally undiscussed in formalisations of C++ memory model.
//! In Rust, these operations can only be done through a `&mut AtomicFoo` reference or one derived from it, therefore these operations
//! can only happen after all previous accesses on the same locations. This implementation is adapted to allow these operations.
//! A mixed atomicity read that races with writes, or a write that races with reads or writes will still cause UBs to be thrown.
@ -61,7 +61,7 @@
//
// 2. In the operational semantics, each store element keeps the timestamp of a thread when it loads from the store.
// If the same thread loads from the same store element multiple times, then the timestamps at all loads are saved in a list of load elements.
// This is not necessary as later loads by the same thread will always have greater timetstamp values, so we only need to record the timestamp of the first
// This is not necessary as later loads by the same thread will always have greater timestamp values, so we only need to record the timestamp of the first
// load by each thread. This optimisation is done in tsan11
// (https://github.com/ChrisLidbury/tsan11/blob/ecbd6b81e9b9454e01cba78eb9d88684168132c7/lib/tsan/rtl/tsan_relaxed.h#L35-L37)
// and here.
@ -193,7 +193,7 @@ impl StoreBufferAlloc {
buffers.remove_pos_range(pos_range);
}
AccessType::Empty(_) => {
// The range had no weak behaivours attached, do nothing
// The range had no weak behaviours attached, do nothing
}
}
}
@ -336,7 +336,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
let mut found_sc = false;
// FIXME: we want an inclusive take_while (stops after a false predicate, but
// includes the element that gave the false), but such function doesn't yet
// exist in the standard libary https://github.com/rust-lang/rust/issues/62208
// exist in the standard library https://github.com/rust-lang/rust/issues/62208
// so we have to hack around it with keep_searching
let mut keep_searching = true;
let candidates = self

View file

@ -372,7 +372,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
// Inlining of `DEFAULT` from
// https://github.com/rust-lang/rust/blob/master/compiler/rustc_session/src/config/sigpipe.rs.
// Alaways using DEFAULT is okay since we don't support signals in Miri anyway.
// Always using DEFAULT is okay since we don't support signals in Miri anyway.
let sigpipe = 2;
ecx.call_function(
@ -456,7 +456,7 @@ pub fn eval_entry<'tcx>(
return None;
}
// Check for memory leaks.
info!("Additonal static roots: {:?}", ecx.machine.static_roots);
info!("Additional static roots: {:?}", ecx.machine.static_roots);
let leaks = ecx.leak_report(&ecx.machine.static_roots);
if leaks != 0 {
tcx.sess.err("the evaluated program leaked memory");

View file

@ -524,7 +524,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
// Make sure we visit aggregrates in increasing offset order.
// Make sure we visit aggregates in increasing offset order.
fn visit_aggregate(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,

View file

@ -77,7 +77,7 @@ impl<'mir, 'tcx> GlobalStateInner {
Ok(pos) => Some(global_state.int_to_ptr_map[pos].1),
Err(0) => None,
Err(pos) => {
// This is the largest of the adresses smaller than `int`,
// This is the largest of the addresses smaller than `int`,
// i.e. the greatest lower bound (glb)
let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1];
// This never overflows because `addr >= glb`

View file

@ -491,9 +491,9 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler")
});
let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
let borrow_tracker = config.borrow_tracker.map(|bt| bt.instanciate_global_state(config));
let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config));
// Determinine page size, stack address, and stack size.
// Determine page size, stack address, and stack size.
// These values are mostly meaningless, but the stack address is also where we start
// allocating physical integer addresses for all allocations.
let page_size = if let Some(page_size) = config.page_size {

View file

@ -585,9 +585,9 @@ fn simd_element_to_bool(elem: ImmTy<'_, Provenance>) -> InterpResult<'_, bool> {
})
}
fn simd_bitmask_index(idx: u32, vec_len: u32, endianess: Endian) -> u32 {
fn simd_bitmask_index(idx: u32, vec_len: u32, endianness: Endian) -> u32 {
assert!(idx < vec_len);
match endianess {
match endianness {
Endian::Little => idx,
#[allow(clippy::integer_arithmetic)] // idx < vec_len
Endian::Big => vec_len - 1 - idx, // reverse order of bits

View file

@ -329,7 +329,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
match direction {
PathConversion::HostToTarget => {
// If this start withs a `\`, we add `\\?` so it starts with `\\?\` which is
// some magic path on Windos that *is* considered absolute.
// some magic path on Windows that *is* considered absolute.
if converted.get(0).copied() == Some(b'\\') {
converted.splice(0..0, b"\\\\?".iter().copied());
}

View file

@ -40,7 +40,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.eval_libc_i32("CLOCK_REALTIME_COARSE"),
];
// The second kind is MONOTONIC clocks for which 0 is an arbitrary time point, but they are
// never allowed to go backwards. We don't need to do any additonal monotonicity
// never allowed to go backwards. We don't need to do any additional monotonicity
// enforcement because std::time::Instant already guarantees that it is monotonic.
relative_clocks = vec![
this.eval_libc_i32("CLOCK_MONOTONIC"),

View file

@ -79,7 +79,7 @@ impl<'tcx> TlsData<'tcx> {
trace!("TLS key {} removed", key);
Ok(())
}
None => throw_ub_format!("removing a non-existig TLS key: {}", key),
None => throw_ub_format!("removing a nonexistent TLS key: {}", key),
}
}
@ -175,7 +175,7 @@ impl<'tcx> TlsData<'tcx> {
Some(key) => Excluded(key),
None => Unbounded,
};
// We interpret the documentaion above (taken from POSIX) as saying that we need to iterate
// We interpret the documentation above (taken from POSIX) as saying that we need to iterate
// over all keys and run each destructor at least once before running any destructor a 2nd
// time. That's why we have `key` to indicate how far we got in the current iteration. If we
// return `None`, `schedule_next_pthread_tls_dtor` will re-try with `ket` set to `None` to

View file

@ -1015,8 +1015,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let path = this.read_path_from_c_str(pathname_ptr)?.into_owned();
// See <https://github.com/rust-lang/rust/pull/79196> for a discussion of argument sizes.
let at_ampty_path = this.eval_libc_i32("AT_EMPTY_PATH");
let empty_path_flag = flags & at_ampty_path == at_ampty_path;
let at_empty_path = this.eval_libc_i32("AT_EMPTY_PATH");
let empty_path_flag = flags & at_empty_path == at_empty_path;
// We only support:
// * interpreting `path` as an absolute directory,
// * interpreting `path` as a path relative to `dirfd` when the latter is `AT_FDCWD`, or
@ -1053,7 +1053,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
return Ok(-1);
}
// the `_mask_op` paramter specifies the file information that the caller requested.
// the `_mask_op` parameter specifies the file information that the caller requested.
// However `statx` is allowed to return information that was not requested or to not
// return information that was requested. This `mask` represents the information we can
// actually provide for any target.

View file

@ -169,7 +169,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.linux_statx(&args[1], &args[2], &args[3], &args[4], &args[5])?;
this.write_scalar(Scalar::from_target_isize(result.into(), this), dest)?;
}
// `futex` is used by some synchonization primitives.
// `futex` is used by some synchronization primitives.
id if id == sys_futex => {
futex(this, &args[1..], dest)?;
}
@ -180,7 +180,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
// Miscelanneous
// Miscellaneous
"getrandom" => {
let [ptr, len, flags] =
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;

View file

@ -242,7 +242,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
//
// To distinguish these two cases in already constructed mutexes, we
// use the same trick as glibc: for the case when
// `pthread_mutexattr_settype` is caled explicitly, we set the
// `pthread_mutexattr_settype` is called explicitly, we set the
// `PTHREAD_MUTEX_NORMAL_FLAG` flag.
let normal_kind = kind | PTHREAD_MUTEX_NORMAL_FLAG;
// Check that after setting the flag, the kind is distinguishable

View file

@ -96,7 +96,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
if byte_offset != 0 {
throw_unsup_format!(
"`NtWriteFile` `ByteOffset` paremeter is non-null, which is unsupported"
"`NtWriteFile` `ByteOffset` parameter is non-null, which is unsupported"
);
}

View file

@ -1,5 +1,5 @@
#![feature(core_intrinsics)]
fn main() {
// divison by 0
// division by 0
unsafe { std::intrinsics::exact_div(2, 0) }; //~ ERROR: divisor of zero
}

View file

@ -1,5 +1,5 @@
#![feature(core_intrinsics)]
fn main() {
// divison with a remainder
// division with a remainder
unsafe { std::intrinsics::exact_div(2u16, 3) }; //~ ERROR: 2_u16 cannot be divided by 3_u16 without remainder
}

View file

@ -1,5 +1,5 @@
#![feature(core_intrinsics)]
fn main() {
// signed divison with a remainder
// signed division with a remainder
unsafe { std::intrinsics::exact_div(-19i8, 2) }; //~ ERROR: -19_i8 cannot be divided by 2_i8 without remainder
}

View file

@ -1,5 +1,5 @@
#![feature(core_intrinsics)]
fn main() {
// divison of MIN by -1
// division of MIN by -1
unsafe { std::intrinsics::exact_div(i64::MIN, -1) }; //~ ERROR: overflow in signed remainder (dividing MIN by -1)
}

View file

@ -1,5 +1,5 @@
// A callee may not read the destination of our `&mut` without us noticing.
// Thise code got carefully checked to not introduce any reborrows
// This code got carefully checked to not introduce any reborrows
// that are not explicit in the source. Let's hope the compiler does not break this later!
use std::mem;

View file

@ -1,7 +1,7 @@
//@compile-flags: -Zmiri-symbolic-alignment-check -Zmiri-permissive-provenance -Cdebug-assertions=no
// With the symbolic alignment check, even with intptrcast and without
// validation, we want to be *sure* to catch bugs that arise from pointers being
// insufficiently aligned. The only way to achieve that is not not let programs
// insufficiently aligned. The only way to achieve that is not to let programs
// exploit integer information for alignment, so here we test that this is
// indeed the case.
//

View file

@ -14,7 +14,7 @@ static mut RECORD: usize = 0;
static mut KEYS: [Key; 2] = [0; 2];
static mut GLOBALS: [u64; 2] = [1, 0];
static mut CANNARY: *mut u64 = ptr::null_mut(); // this serves as a cannary: if TLS dtors are not run properly, this will not get deallocated, making the test fail.
static mut CANARY: *mut u64 = ptr::null_mut(); // this serves as a canary: if TLS dtors are not run properly, this will not get deallocated, making the test fail.
pub unsafe fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
let mut key = 0;
@ -33,7 +33,7 @@ pub fn record(r: usize) {
}
unsafe extern "C" fn dtor(ptr: *mut u64) {
assert!(CANNARY != ptr::null_mut()); // make sure we do not get run too often
assert!(CANARY != ptr::null_mut()); // make sure we do not get run too often
let val = *ptr;
let which_key =
@ -45,15 +45,15 @@ unsafe extern "C" fn dtor(ptr: *mut u64) {
set(KEYS[which_key], ptr as *mut _);
}
// Check if the records matches what we expect. If yes, clear the cannary.
// If the record is wrong, the cannary will never get cleared, leading to a leak -> test fails.
// Check if the records matches what we expect. If yes, clear the canary.
// If the record is wrong, the canary will never get cleared, leading to a leak -> test fails.
// If the record is incomplete (i.e., more dtor calls happen), the check at the beginning of this function will fail -> test fails.
// The correct sequence is: First key 0, then key 1, then key 0.
// Note that this relies on dtor order, which is not specified by POSIX, but seems to be
// consistent between Miri and Linux currently (as of Aug 2022).
if RECORD == 0_1_0 {
drop(Box::from_raw(CANNARY));
CANNARY = ptr::null_mut();
drop(Box::from_raw(CANARY));
CANARY = ptr::null_mut();
}
}
@ -67,7 +67,7 @@ fn main() {
set(*key, global as *mut _ as *mut u8);
}
// Initialize cannary
CANNARY = Box::into_raw(Box::new(0u64));
// Initialize canary
CANARY = Box::into_raw(Box::new(0u64));
}
}

View file

@ -130,7 +130,7 @@ fn test_readlink() {
let mut large_buf = vec![0xFF; expected_path.len() + 1];
let res =
unsafe { libc::readlink(symlink_c_ptr, large_buf.as_mut_ptr().cast(), large_buf.len()) };
// Check that the resovled path was properly written into the buf.
// Check that the resolved path was properly written into the buf.
assert_eq!(&large_buf[..(large_buf.len() - 1)], expected_path);
assert_eq!(large_buf.last(), Some(&0xFF));
assert_eq!(res, large_buf.len() as isize - 1);

View file

@ -90,7 +90,7 @@ fn test_posix_realpath_errors() {
use std::ffi::CString;
use std::io::ErrorKind;
// Test non-existent path returns an error.
// Test nonexistent path returns an error.
let c_path = CString::new("./nothing_to_see_here").expect("CString::new failed");
let r = unsafe { libc::realpath(c_path.as_ptr(), std::ptr::null_mut()) };
assert!(r.is_null());

View file

@ -10,7 +10,7 @@
// the RNG and never observed in our tests.
//
// To mitigate this, each test is ran enough times such that the chance
// of spurious success is very low. These tests never supriously fail.
// of spurious success is very low. These tests never spuriously fail.
// Test cases and their consistent outcomes are from
// http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/

View file

@ -93,7 +93,7 @@ fn pointers_and_wrappers() {
trait Trait {
// This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
// without unsized_locals), but wrappers arond `Self` currently are not.
// without unsized_locals), but wrappers around `Self` currently are not.
// FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
// fn wrapper(self: Wrapper<Self>) -> i32;
fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;

View file

@ -19,7 +19,7 @@ unsafe impl GlobalAlloc for Allocator {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.size() == 123 {
println!("Dellocated!")
println!("Deallocated!")
}
System.dealloc(ptr, layout)

View file

@ -1,2 +1,2 @@
Allocated!
Dellocated!
Deallocated!

View file

@ -7,7 +7,7 @@ macro_rules! zip {
};
// Intermediate steps to build the zipped expression, the match pattern, and
// and the output tuple of the closure, using macro hygene to repeatedly
// and the output tuple of the closure, using macro hygiene to repeatedly
// introduce new variables named 'x'.
([$a:expr, $($rest:expr),*], $zip:expr, $pat:pat, [$($flat:expr),*]) => {
zip!([$($rest),*], $zip.zip($a), ($pat,x), [$($flat),*, x])

View file

@ -36,7 +36,7 @@ fn test_basic() {
let b = x.b;
assert_eq!(a, 42);
assert_eq!(b, 99);
assert_eq!(&x.fill, &0); // `fill` just requirs 1-byte-align, so this is fine
assert_eq!(&x.fill, &0); // `fill` just requires 1-byte-align, so this is fine
// can't do `assert_eq!(x.a, 42)`, because `assert_eq!` takes a reference
assert_eq!({ x.a }, 42);
assert_eq!({ x.b }, 99);

View file

@ -63,7 +63,7 @@ fn ptr_arith_offset_overflow() {
let v = [1i16, 2];
let x = &mut ptr::null(); // going through memory as there are more sanity checks along that path
*x = v.as_ptr().wrapping_offset(1); // ptr to the 2nd element
// Adding 2*isize::max and then 1 is like substracting 1
// Adding 2*isize::max and then 1 is like subtracting 1
*x = x.wrapping_offset(isize::MAX);
*x = x.wrapping_offset(isize::MAX);
*x = x.wrapping_offset(1);

View file

@ -58,7 +58,7 @@ fn main() {
STATIC_SIMPLE_FN(x);
CONST_SIMPLE_FN(x);
STATIC_BAZ(BYTES); // neees static lifetime
STATIC_BAZ(BYTES); // needs static lifetime
CONST_BAZ(BYTES);
// make sure this works with different lifetimes

View file

@ -365,7 +365,7 @@ fn test_directory() {
// Deleting the directory should succeed.
remove_dir(&dir_path).unwrap();
// Reading the metadata of a non-existent directory should fail with a "not found" error.
// Reading the metadata of a nonexistent directory should fail with a "not found" error.
assert_eq!(ErrorKind::NotFound, check_metadata(&[], &dir_path).unwrap_err().kind());
// To test remove_dir_all, re-create the directory with a file and a directory in it.

View file

@ -90,7 +90,7 @@ fn mut_raw_mut() {
assert_eq!(unsafe { *xraw }, 4);
assert_eq!(*xref1, 4);
assert_eq!(unsafe { *xraw }, 4);
// we cannot use xref2; see `compile-fail/stacked-borows/illegal_read4.rs`
// we cannot use xref2; see `compile-fail/stacked-borrows/illegal_read4.rs`
}
assert_eq!(x, 4);
}
@ -104,7 +104,7 @@ fn partially_invalidate_mut() {
assert_eq!(*data, (1, 1));
}
// Make sure that we can handle the situation where a loaction is frozen when being dropped.
// Make sure that we can handle the situation where a location is frozen when being dropped.
fn drop_after_sharing() {
let x = String::from("hello!");
let _len = x.len();
@ -224,7 +224,7 @@ fn wide_raw_ptr_in_tuple() {
fn not_unpin_not_protected() {
// `&mut !Unpin`, at least for now, does not get `noalias` nor `dereferenceable`, so we also
// don't add protectors. (We could, but until we have a better idea for where we want to go with
// the self-referntial-generator situation, it does not seem worth the potential trouble.)
// the self-referential-generator situation, it does not seem worth the potential trouble.)
use std::marker::PhantomPinned;
pub struct NotUnpin(i32, PhantomPinned);

View file

@ -29,7 +29,7 @@ fn unique_aliasing() {
// This is a regression test for the aliasing rules of a `Unique<T>` pointer.
// At the time of writing this test case, Miri does not treat `Unique<T>`
// pointers as a special case, these are treated like any other raw pointer.
// However, there are existing Github issues which may lead to `Unique<T>`
// However, there are existing GitHub issues which may lead to `Unique<T>`
// becoming a special case through asserting unique ownership over the pointee:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/258
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/262

View file

@ -1,6 +1,6 @@
//@compile-flags: -Zmiri-ignore-leaks
// Tests operations not perfomable through C++'s atomic API
// Tests operations not performable through C++'s atomic API
// but doable in safe (at least sound) Rust.
#![feature(atomic_from_mut)]

View file

@ -1,6 +1,6 @@
//@compile-flags: -Zmiri-ignore-leaks
// Tests operations not perfomable through C++'s atomic API
// Tests operations not performable through C++'s atomic API
// but doable in unsafe Rust which we think *should* be fine.
// Nonetheless they may be determined as inconsistent with the
// memory model in the future.