diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs index dfc0b565195e..c3ff928a3277 100644 --- a/library/core/src/slice/cmp.rs +++ b/library/core/src/slice/cmp.rs @@ -13,11 +13,7 @@ impl const PartialEq<[U]> for [T] where T: [const] PartialEq, { - // It's not worth trying to inline the loops underneath here *in MIR*, - // and preventing it encourages more useful inlining upstream, - // such as in `::eq`. - // The codegen backend can still inline it later if needed. - #[rustc_no_mir_inline] + #[inline] fn eq(&self, other: &[U]) -> bool { SlicePartialEq::equal(self, other) } @@ -108,6 +104,11 @@ impl const SlicePartialEq for [A] where A: [const] PartialEq, { + // It's not worth trying to inline the loops underneath here *in MIR*, + // and preventing it encourages more useful inlining upstream, + // such as in `::eq`. + // The codegen backend can still inline it later if needed. + #[rustc_no_mir_inline] default fn equal(&self, other: &[B]) -> bool { if self.len() != other.len() { return false; @@ -137,6 +138,16 @@ impl const SlicePartialEq for [A] where A: [const] BytewiseEq, { + // This is usually a pretty good backend inlining candidate because the + // intrinsic tends to just be `memcmp`. However, as of 2025-12 letting + // MIR inline this makes reuse worse because it means that, for example, + // `String::eq` doesn't inline, whereas by keeping this from inling all + // the wrappers until the call to this disappear. If the heuristics have + // changed and this is no longer fruitful, though, please do remove it. + // In the mean time, it's fine to not inline it in MIR because the backend + // will still inline it if it things it's important to do so. + #[rustc_no_mir_inline] + #[inline] fn equal(&self, other: &[B]) -> bool { if self.len() != other.len() { return false; diff --git a/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-abort.diff b/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-abort.diff index 0875d437296d..3cf28f4b60af 100644 --- a/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-abort.diff +++ b/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-abort.diff @@ -92,62 +92,66 @@ scope 24 (inlined core::str::::as_bytes) { } scope 25 (inlined std::cmp::impls::::eq) { + scope 26 (inlined core::slice::cmp::::eq) { + } } } } } - scope 26 (inlined std::cmp::impls:: for &String>::eq) { + scope 27 (inlined std::cmp::impls:: for &String>::eq) { let mut _39: &std::string::String; let mut _40: &str; - scope 27 (inlined >::eq) { - scope 28 (inlined #[track_caller] >::index) { + scope 28 (inlined >::eq) { + scope 29 (inlined #[track_caller] >::index) { let _41: &str; - scope 29 (inlined String::as_str) { + scope 30 (inlined String::as_str) { let _42: &[u8]; - scope 30 (inlined Vec::::as_slice) { + scope 31 (inlined Vec::::as_slice) { let _43: *const [u8]; let mut _44: *const u8; let mut _45: usize; - scope 31 (inlined Vec::::as_ptr) { - scope 32 (inlined alloc::raw_vec::RawVec::::ptr) { - scope 33 (inlined alloc::raw_vec::RawVecInner::ptr::) { - scope 34 (inlined alloc::raw_vec::RawVecInner::non_null::) { + scope 32 (inlined Vec::::as_ptr) { + scope 33 (inlined alloc::raw_vec::RawVec::::ptr) { + scope 34 (inlined alloc::raw_vec::RawVecInner::ptr::) { + scope 35 (inlined alloc::raw_vec::RawVecInner::non_null::) { let mut _46: std::ptr::NonNull; - scope 35 (inlined Unique::::cast::) { - scope 36 (inlined NonNull::::cast::) { - scope 37 (inlined NonNull::::as_ptr) { + scope 36 (inlined Unique::::cast::) { + scope 37 (inlined NonNull::::cast::) { + scope 38 (inlined NonNull::::as_ptr) { } } } - scope 38 (inlined Unique::::as_non_null_ptr) { + scope 39 (inlined Unique::::as_non_null_ptr) { } } - scope 39 (inlined NonNull::::as_ptr) { + scope 40 (inlined NonNull::::as_ptr) { } } } } } - scope 40 (inlined from_utf8_unchecked) { + scope 41 (inlined from_utf8_unchecked) { } } - scope 41 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { + scope 42 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { } } - scope 42 (inlined #[track_caller] core::str::traits:: for str>::index) { - scope 43 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { + scope 43 (inlined #[track_caller] core::str::traits:: for str>::index) { + scope 44 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { } } - scope 44 (inlined core::str::traits::::eq) { + scope 45 (inlined core::str::traits::::eq) { let mut _47: &&[u8]; let _48: &[u8]; let mut _49: &&[u8]; let _50: &[u8]; - scope 45 (inlined core::str::::as_bytes) { - } scope 46 (inlined core::str::::as_bytes) { } - scope 47 (inlined std::cmp::impls::::eq) { + scope 47 (inlined core::str::::as_bytes) { + } + scope 48 (inlined std::cmp::impls::::eq) { + scope 49 (inlined core::slice::cmp::::eq) { + } } } } @@ -214,7 +218,7 @@ StorageLive(_38); _36 = copy _29 as &[u8] (Transmute); _38 = copy _28 as &[u8] (Transmute); - _7 = <[u8] as PartialEq>::eq(move _36, move _38) -> [return: bb19, unwind unreachable]; + _7 = <[u8] as core::slice::cmp::SlicePartialEq>::equal(move _36, move _38) -> [return: bb19, unwind unreachable]; } bb5: { @@ -270,7 +274,7 @@ StorageLive(_50); _48 = copy _41 as &[u8] (Transmute); _50 = copy _40 as &[u8] (Transmute); - _14 = <[u8] as PartialEq>::eq(move _48, move _50) -> [return: bb20, unwind unreachable]; + _14 = <[u8] as core::slice::cmp::SlicePartialEq>::equal(move _48, move _50) -> [return: bb20, unwind unreachable]; } bb7: { diff --git a/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-unwind.diff b/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-unwind.diff index b942eeed37e0..2f0d83f92792 100644 --- a/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-unwind.diff +++ b/tests/mir-opt/jump_threading.chained_conditions.JumpThreading.panic-unwind.diff @@ -92,62 +92,66 @@ scope 24 (inlined core::str::::as_bytes) { } scope 25 (inlined std::cmp::impls::::eq) { + scope 26 (inlined core::slice::cmp::::eq) { + } } } } } - scope 26 (inlined std::cmp::impls:: for &String>::eq) { + scope 27 (inlined std::cmp::impls:: for &String>::eq) { let mut _39: &std::string::String; let mut _40: &str; - scope 27 (inlined >::eq) { - scope 28 (inlined #[track_caller] >::index) { + scope 28 (inlined >::eq) { + scope 29 (inlined #[track_caller] >::index) { let _41: &str; - scope 29 (inlined String::as_str) { + scope 30 (inlined String::as_str) { let _42: &[u8]; - scope 30 (inlined Vec::::as_slice) { + scope 31 (inlined Vec::::as_slice) { let _43: *const [u8]; let mut _44: *const u8; let mut _45: usize; - scope 31 (inlined Vec::::as_ptr) { - scope 32 (inlined alloc::raw_vec::RawVec::::ptr) { - scope 33 (inlined alloc::raw_vec::RawVecInner::ptr::) { - scope 34 (inlined alloc::raw_vec::RawVecInner::non_null::) { + scope 32 (inlined Vec::::as_ptr) { + scope 33 (inlined alloc::raw_vec::RawVec::::ptr) { + scope 34 (inlined alloc::raw_vec::RawVecInner::ptr::) { + scope 35 (inlined alloc::raw_vec::RawVecInner::non_null::) { let mut _46: std::ptr::NonNull; - scope 35 (inlined Unique::::cast::) { - scope 36 (inlined NonNull::::cast::) { - scope 37 (inlined NonNull::::as_ptr) { + scope 36 (inlined Unique::::cast::) { + scope 37 (inlined NonNull::::cast::) { + scope 38 (inlined NonNull::::as_ptr) { } } } - scope 38 (inlined Unique::::as_non_null_ptr) { + scope 39 (inlined Unique::::as_non_null_ptr) { } } - scope 39 (inlined NonNull::::as_ptr) { + scope 40 (inlined NonNull::::as_ptr) { } } } } } - scope 40 (inlined from_utf8_unchecked) { + scope 41 (inlined from_utf8_unchecked) { } } - scope 41 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { + scope 42 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { } } - scope 42 (inlined #[track_caller] core::str::traits:: for str>::index) { - scope 43 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { + scope 43 (inlined #[track_caller] core::str::traits:: for str>::index) { + scope 44 (inlined #[track_caller] core::str::traits:: for RangeFull>::index) { } } - scope 44 (inlined core::str::traits::::eq) { + scope 45 (inlined core::str::traits::::eq) { let mut _47: &&[u8]; let _48: &[u8]; let mut _49: &&[u8]; let _50: &[u8]; - scope 45 (inlined core::str::::as_bytes) { - } scope 46 (inlined core::str::::as_bytes) { } - scope 47 (inlined std::cmp::impls::::eq) { + scope 47 (inlined core::str::::as_bytes) { + } + scope 48 (inlined std::cmp::impls::::eq) { + scope 49 (inlined core::slice::cmp::::eq) { + } } } } @@ -214,7 +218,7 @@ StorageLive(_38); _36 = copy _29 as &[u8] (Transmute); _38 = copy _28 as &[u8] (Transmute); - _7 = <[u8] as PartialEq>::eq(move _36, move _38) -> [return: bb23, unwind: bb22]; + _7 = <[u8] as core::slice::cmp::SlicePartialEq>::equal(move _36, move _38) -> [return: bb23, unwind: bb22]; } bb5: { @@ -270,7 +274,7 @@ StorageLive(_50); _48 = copy _41 as &[u8] (Transmute); _50 = copy _40 as &[u8] (Transmute); - _14 = <[u8] as PartialEq>::eq(move _48, move _50) -> [return: bb24, unwind: bb22]; + _14 = <[u8] as core::slice::cmp::SlicePartialEq>::equal(move _48, move _50) -> [return: bb24, unwind: bb22]; } bb7: {