diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 44d5333ce1f4..b8638c5b09be 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -745,7 +745,20 @@ macro_rules! uint_impl { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn trailing_zeros(self) -> u32 { - unsafe { $cttz(self as $ActualT) as u32 } + // As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic + // emits two conditional moves on x86_64. By promoting the value to + // u16 and setting bit 8, we get better code without any conditional + // operations. + // FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284) + // pending, remove this workaround once LLVM generates better code + // for cttz8. + unsafe { + if $BITS == 8 { + intrinsics::cttz16(self as u16 | 0x100) as u32 + } else { + $cttz(self as $ActualT) as u32 + } + } } /// Shifts the bits to the left by a specified amount, `n`, diff --git a/src/test/run-pass/intrinsics-integer.rs b/src/test/run-pass/intrinsics-integer.rs index f1d731c8b1d7..8dbe927f06bf 100644 --- a/src/test/run-pass/intrinsics-integer.rs +++ b/src/test/run-pass/intrinsics-integer.rs @@ -109,11 +109,6 @@ pub fn main() { assert_eq!(cttz32(100), 2); assert_eq!(cttz64(100), 2); - assert_eq!(cttz8(-1), 0); - assert_eq!(cttz16(-1), 0); - assert_eq!(cttz32(-1), 0); - assert_eq!(cttz64(-1), 0); - assert_eq!(bswap16(0x0A0B), 0x0B0A); assert_eq!(bswap32(0x0ABBCC0D), 0x0DCCBB0A); assert_eq!(bswap64(0x0122334455667708), 0x0877665544332201);