From 1f0cbb29458fdd6d3a3656516d485a23a0aa18f3 Mon Sep 17 00:00:00 2001 From: Gary Guo Date: Tue, 31 Aug 2021 00:22:43 +0100 Subject: [PATCH] Use atomic_load_unordered for first word load in misaligned case --- library/compiler-builtins/src/mem/impls.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/compiler-builtins/src/mem/impls.rs b/library/compiler-builtins/src/mem/impls.rs index 7022d6257116..65887a338929 100644 --- a/library/compiler-builtins/src/mem/impls.rs +++ b/library/compiler-builtins/src/mem/impls.rs @@ -61,8 +61,8 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize) // Realign src let mut src_aligned = (src as usize & !WORD_MASK) as *mut usize; - // XXX: Could this possibly be UB? - let mut prev_word = *src_aligned; + // This will read (but won't use) bytes out of bound. + let mut prev_word = core::intrinsics::atomic_load_unordered(src_aligned); while dest_usize < dest_end { src_aligned = src_aligned.add(1); @@ -154,8 +154,8 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) { // Realign src_aligned let mut src_aligned = (src as usize & !WORD_MASK) as *mut usize; - // XXX: Could this possibly be UB? - let mut prev_word = *src_aligned; + // This will read (but won't use) bytes out of bound. + let mut prev_word = core::intrinsics::atomic_load_unordered(src_aligned); while dest_start < dest_usize { src_aligned = src_aligned.sub(1);