diff --git a/library/compiler-builtins/src/mem/x86_64.rs b/library/compiler-builtins/src/mem/x86_64.rs index daa92098e6bc..e9003310c202 100644 --- a/library/compiler-builtins/src/mem/x86_64.rs +++ b/library/compiler-builtins/src/mem/x86_64.rs @@ -173,6 +173,7 @@ pub unsafe fn compare_bytes(a: *const u8, b: *const u8, n: usize) -> i32 { c16(a.cast(), b.cast(), n) } +#[cfg(target_feature="sse2")] #[inline(always)] pub unsafe fn c_string_length(s: *const core::ffi::c_char) -> usize { let mut n: usize; @@ -256,6 +257,19 @@ pub unsafe fn c_string_length(s: *const core::ffi::c_char) -> usize { n } +// Provided for scenarios like kernel development, where SSE might not +// be available. +#[cfg(not(target_feature="sse2"))] +#[inline(always)] +pub unsafe fn c_string_length(mut s: *const core::ffi::c_char) -> usize { + let mut n = 0; + while *s != 0 { + n += 1; + s = s.add(1); + } + n +} + /// Determine optimal parameters for a `rep` instruction. fn rep_param(dest: *mut u8, mut count: usize) -> (usize, usize, usize) { // Unaligned writes are still slow on modern processors, so align the destination address.