This commit is contained in:
Ralf Jung 2025-06-28 09:12:51 +02:00
parent cff5a7cb29
commit 35e6def487
7 changed files with 8 additions and 10 deletions

View file

@ -189,7 +189,7 @@ impl IsolatedAlloc {
};
assert_ne!(page_ptr.addr(), usize::MAX, "mmap failed");
// `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
assert!(self.page_size % COMPRESSION_FACTOR == 0);
assert!(self.page_size.is_multiple_of(COMPRESSION_FACTOR));
self.page_infos.push(DenseBitSet::new_empty(self.page_size / COMPRESSION_FACTOR));
self.page_ptrs.push(NonNull::new(page_ptr).unwrap());
(NonNull::new(page_ptr).unwrap(), self.page_infos.last_mut().unwrap())

View file

@ -129,7 +129,7 @@ impl ReusePool {
let idx = rng.random_range(begin..end);
// Remove it from the pool and return.
let (chosen_addr, chosen_size, chosen_thread, clock) = subpool.remove(idx);
debug_assert!(chosen_size >= size && chosen_addr % align.bytes() == 0);
debug_assert!(chosen_size >= size && chosen_addr.is_multiple_of(align.bytes()));
debug_assert!(cross_thread_reuse || chosen_thread == thread);
// No synchronization needed if we reused from the current thread.
Some((chosen_addr, if chosen_thread == thread { None } else { Some(clock) }))

View file

@ -327,7 +327,7 @@ mod tests {
for i in 0..1000 {
i.hash(&mut hasher);
let rng = hasher.finish();
let op = rng % 3 == 0;
let op = rng.is_multiple_of(3);
let key = (rng / 2) % 50;
let val = (rng / 100) % 1000;
if op {

View file

@ -1056,7 +1056,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
// What's the offset between us and the promised alignment?
let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
// That must also be aligned.
if distance % align.bytes() == 0 {
if distance.is_multiple_of(align.bytes()) {
// All looking good!
None
} else {
@ -1612,7 +1612,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
ecx.machine.since_gc += 1;
// Possibly report our progress. This will point at the terminator we are about to execute.
if let Some(report_progress) = ecx.machine.report_progress {
if ecx.machine.basic_block_count % u64::from(report_progress) == 0 {
if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
block_count: ecx.machine.basic_block_count,
});

View file

@ -22,8 +22,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let flags = this.read_scalar(flags)?.to_i32()?;
// old_address must be a multiple of the page size
#[expect(clippy::arithmetic_side_effects)] // PAGE_SIZE is nonzero
if old_address.addr().bytes() % this.machine.page_size != 0 || new_size == 0 {
if !old_address.addr().bytes().is_multiple_of(this.machine.page_size) || new_size == 0 {
this.set_last_error(LibcError("EINVAL"))?;
return interp_ok(this.eval_libc("MAP_FAILED"));
}

View file

@ -130,8 +130,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// addr must be a multiple of the page size, but apart from that munmap is just implemented
// as a dealloc.
#[expect(clippy::arithmetic_side_effects)] // PAGE_SIZE is nonzero
if addr.addr().bytes() % this.machine.page_size != 0 {
if !addr.addr().bytes().is_multiple_of(this.machine.page_size) {
return this.set_last_error_and_return_i32(LibcError("EINVAL"));
}

View file

@ -17,7 +17,7 @@ fn bytewise_equal_atomic_relaxed<'tcx>(
// We do this in chunks of 4, so that we are okay to race with (sufficiently aligned)
// 4-byte atomic accesses.
assert!(size.bytes() % 4 == 0);
assert!(size.bytes().is_multiple_of(4));
for i in 0..(size.bytes() / 4) {
let offset = Size::from_bytes(i.strict_mul(4));
let load = |place: &MPlaceTy<'tcx>| {