various minor native-lib-tracing tweaks, and disable naive-lib-tracing mode by default

This commit is contained in:
Ralf Jung 2025-06-28 11:13:11 +02:00
parent 96a70e2e91
commit 62bb6216ea
14 changed files with 165 additions and 206 deletions

View file

@ -419,11 +419,9 @@ to Miri failing to detect cases of undefined behavior in a program.
Finally, the flag is **unsound** in the sense that Miri stops tracking details such as
initialization and provenance on memory shared with native code, so it is easily possible to write
code that has UB which is missed by Miri.
* `-Zmiri-force-old-native-lib-mode` disables the WIP improved native code access tracking. If for
whatever reason enabling native calls leads to odd behaviours or causes Miri to panic, disabling
the tracer *might* fix this. This will likely be removed once the tracer has been adequately
battle-tested. Note that this flag is only meaningful on Linux systems; other Unixes (currently)
exclusively use the old native-lib code.
* `-Zmiri-native-lib-enable-tracing` enables the WIP detailed tracing mode for invoking native code.
Note that this flag is only meaningful on Linux systems; other Unixes (currently) do not support
tracing mode.
* `-Zmiri-measureme=<name>` enables `measureme` profiling for the interpreted program.
This can be used to find which parts of your program are executing slowly under Miri.
The profile is written out to a file inside a directory called `<name>`, and can be processed

View file

@ -305,12 +305,12 @@ impl IsolatedAlloc {
/// Returns a vector of page addresses managed by the allocator.
pub fn pages(&self) -> Vec<usize> {
let mut pages: Vec<usize> =
self.page_ptrs.clone().into_iter().map(|p| p.expose_provenance().get()).collect();
self.huge_ptrs.iter().for_each(|(ptr, size)| {
self.page_ptrs.iter().map(|p| p.expose_provenance().get()).collect();
for (ptr, size) in self.huge_ptrs.iter() {
for i in 0..size / self.page_size {
pages.push(ptr.expose_provenance().get().strict_add(i * self.page_size));
}
});
}
pages
}

View file

@ -228,7 +228,7 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
let return_code = miri::eval_entry(tcx, entry_def_id, entry_type, &config, None)
.unwrap_or_else(|| {
#[cfg(target_os = "linux")]
miri::register_retcode_sv(rustc_driver::EXIT_FAILURE);
miri::native_lib::register_retcode_sv(rustc_driver::EXIT_FAILURE);
tcx.dcx().abort_if_errors();
rustc_driver::EXIT_FAILURE
});
@ -724,8 +724,8 @@ fn main() {
} else {
show_error!("-Zmiri-native-lib `{}` does not exist", filename);
}
} else if arg == "-Zmiri-force-old-native-lib-mode" {
miri_config.force_old_native_lib = true;
} else if arg == "-Zmiri-native-lib-enable-tracing" {
miri_config.native_lib_enable_tracing = true;
} else if let Some(param) = arg.strip_prefix("-Zmiri-num-cpus=") {
let num_cpus = param
.parse::<u32>()
@ -797,14 +797,14 @@ fn main() {
debug!("rustc arguments: {:?}", rustc_args);
debug!("crate arguments: {:?}", miri_config.args);
#[cfg(target_os = "linux")]
if !miri_config.native_lib.is_empty() && !miri_config.force_old_native_lib {
if !miri_config.native_lib.is_empty() && miri_config.native_lib_enable_tracing {
// FIXME: This should display a diagnostic / warning on error
// SAFETY: If any other threads exist at this point (namely for the ctrlc
// handler), they will not interact with anything on the main rustc/Miri
// thread in an async-signal-unsafe way such as by accessing shared
// semaphores, etc.; the handler only calls `sleep()` and `exit()`, which
// are async-signal-safe, as is accessing atomics
let _ = unsafe { miri::init_sv() };
let _ = unsafe { miri::native_lib::init_sv() };
}
run_compiler_and_exit(
&rustc_args,

View file

@ -132,8 +132,9 @@ pub enum NonHaltingDiagnostic {
Int2Ptr {
details: bool,
},
NativeCallSharedMem,
NativeCallNoTrace,
NativeCallSharedMem {
tracing: bool,
},
WeakMemoryOutdatedLoad {
ptr: Pointer,
},
@ -628,10 +629,8 @@ impl<'tcx> MiriMachine<'tcx> {
RejectedIsolatedOp(_) =>
("operation rejected by isolation".to_string(), DiagLevel::Warning),
Int2Ptr { .. } => ("integer-to-pointer cast".to_string(), DiagLevel::Warning),
NativeCallSharedMem =>
NativeCallSharedMem { .. } =>
("sharing memory with a native function".to_string(), DiagLevel::Warning),
NativeCallNoTrace =>
("unable to trace native code memory accesses".to_string(), DiagLevel::Warning),
ExternTypeReborrow =>
("reborrow of reference to `extern type`".to_string(), DiagLevel::Warning),
CreatedPointerTag(..)
@ -666,11 +665,8 @@ impl<'tcx> MiriMachine<'tcx> {
ProgressReport { .. } =>
format!("progress report: current operation being executed is here"),
Int2Ptr { .. } => format!("integer-to-pointer cast"),
NativeCallSharedMem => format!("sharing memory with a native function called via FFI"),
NativeCallNoTrace =>
format!(
"sharing memory with a native function called via FFI, and unable to use ptrace"
),
NativeCallSharedMem { .. } =>
format!("sharing memory with a native function called via FFI"),
WeakMemoryOutdatedLoad { ptr } =>
format!("weak memory emulation: outdated value returned from load at {ptr}"),
ExternTypeReborrow =>
@ -716,42 +712,41 @@ impl<'tcx> MiriMachine<'tcx> {
}
v
}
NativeCallSharedMem => {
vec![
note!(
"when memory is shared with a native function call, Miri can only track initialisation and provenance on a best-effort basis"
),
note!(
"in particular, Miri assumes that the native call initializes all memory it has written to"
),
note!(
"Miri also assumes that any part of this memory may be a pointer that is permitted to point to arbitrary exposed memory"
),
note!(
"what this means is that Miri will easily miss Undefined Behavior related to incorrect usage of this shared memory, so you should not take a clean Miri run as a signal that your FFI code is UB-free"
),
]
}
NativeCallNoTrace => {
vec![
note!(
"when memory is shared with a native function call, Miri stops tracking initialization and provenance for that memory"
),
note!(
"in particular, Miri assumes that the native call initializes all memory it has access to"
),
note!(
"Miri also assumes that any part of this memory may be a pointer that is permitted to point to arbitrary exposed memory"
),
note!(
"what this means is that Miri will easily miss Undefined Behavior related to incorrect usage of this shared memory, so you should not take a clean Miri run as a signal that your FFI code is UB-free"
),
#[cfg(target_os = "linux")]
note!(
"this is normally partially mitigated, but either -Zmiri-force-old-native-lib-mode was passed or ptrace is disabled on your system"
),
]
}
NativeCallSharedMem { tracing } =>
if *tracing {
vec![
note!(
"when memory is shared with a native function call, Miri can only track initialisation and provenance on a best-effort basis"
),
note!(
"in particular, Miri assumes that the native call initializes all memory it has written to"
),
note!(
"Miri also assumes that any part of this memory may be a pointer that is permitted to point to arbitrary exposed memory"
),
note!(
"what this means is that Miri will easily miss Undefined Behavior related to incorrect usage of this shared memory, so you should not take a clean Miri run as a signal that your FFI code is UB-free"
),
note!(
"tracing memory accesses in native code is not yet fully implemented, so there can be further imprecisions beyond what is documented here"
),
]
} else {
vec![
note!(
"when memory is shared with a native function call, Miri stops tracking initialization and provenance for that memory"
),
note!(
"in particular, Miri assumes that the native call initializes all memory it has access to"
),
note!(
"Miri also assumes that any part of this memory may be a pointer that is permitted to point to arbitrary exposed memory"
),
note!(
"what this means is that Miri will easily miss Undefined Behavior related to incorrect usage of this shared memory, so you should not take a clean Miri run as a signal that your FFI code is UB-free"
),
]
},
ExternTypeReborrow => {
assert!(self.borrow_tracker.as_ref().is_some_and(|b| {
matches!(

View file

@ -150,8 +150,8 @@ pub struct MiriConfig {
pub retag_fields: RetagFields,
/// The location of the shared object files to load when calling external functions
pub native_lib: Vec<PathBuf>,
/// Whether to force using the old native lib behaviour even if ptrace might be supported.
pub force_old_native_lib: bool,
/// Whether to enable the new native lib tracing system.
pub native_lib_enable_tracing: bool,
/// Run a garbage collector for BorTags every N basic blocks.
pub gc_interval: u32,
/// The number of CPUs to be reported by miri.
@ -201,7 +201,7 @@ impl Default for MiriConfig {
report_progress: None,
retag_fields: RetagFields::Yes,
native_lib: vec![],
force_old_native_lib: false,
native_lib_enable_tracing: false,
gc_interval: 10_000,
num_cpus: 1,
page_size: None,

View file

@ -100,7 +100,9 @@ use rustc_middle::{bug, span_bug};
use tracing::{info, trace};
#[cfg(target_os = "linux")]
pub use crate::shims::trace::{init_sv, register_retcode_sv};
pub mod native_lib {
pub use crate::shims::{init_sv, register_retcode_sv};
}
// Type aliases that set the provenance parameter.
pub type Pointer = interpret::Pointer<Option<machine::Provenance>>;

View file

@ -19,10 +19,10 @@ pub mod os_str;
pub mod panic;
pub mod time;
pub mod tls;
#[cfg(target_os = "linux")]
pub mod trace;
pub use self::files::FdTable;
#[cfg(target_os = "linux")]
pub use self::native_lib::trace::{init_sv, register_retcode_sv};
pub use self::unix::{DirTable, EpollInterestTable};
/// What needs to be done after emulating an item (a shim or an intrinsic) is done.

View file

@ -1,7 +1,9 @@
//! Implements calling functions from a native library.
use std::ops::Deref;
#[cfg(target_os = "linux")]
use std::{cell::RefCell, rc::Rc};
pub mod trace;
use std::ops::Deref;
use libffi::high::call as ffi;
use libffi::low::CodePtr;
@ -11,12 +13,11 @@ use rustc_middle::ty::{self as ty, IntTy, UintTy};
use rustc_span::Symbol;
#[cfg(target_os = "linux")]
use crate::alloc::isolated_alloc::IsolatedAlloc;
use self::trace::Supervisor;
use crate::*;
#[cfg(target_os = "linux")]
type CallResult<'tcx> =
InterpResult<'tcx, (ImmTy<'tcx>, Option<shims::trace::messages::MemEvents>)>;
type CallResult<'tcx> = InterpResult<'tcx, (ImmTy<'tcx>, Option<self::trace::messages::MemEvents>)>;
#[cfg(not(target_os = "linux"))]
type CallResult<'tcx> = InterpResult<'tcx, (ImmTy<'tcx>, Option<!>)>;
@ -32,84 +33,90 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> CallResult<'tcx> {
let this = self.eval_context_mut();
#[cfg(target_os = "linux")]
let alloc = this.machine.allocator.clone();
#[cfg(not(target_os = "linux"))]
let alloc = ();
let maybe_memevents;
let alloc = this.machine.allocator.as_ref().unwrap();
// SAFETY: We don't touch the machine memory past this point.
#[cfg(target_os = "linux")]
let (guard, stack_ptr) = unsafe { Supervisor::start_ffi(alloc) };
// Call the function (`ptr`) with arguments `libffi_args`, and obtain the return value
// as the specified primitive integer type
let scalar = match dest.layout.ty.kind() {
// ints
ty::Int(IntTy::I8) => {
// Unsafe because of the call to native code.
// Because this is calling a C function it is not necessarily sound,
// but there is no way around this and we've checked as much as we can.
let x = unsafe { do_native_call::<i8>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_i8(x.0)
}
ty::Int(IntTy::I16) => {
let x = unsafe { do_native_call::<i16>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_i16(x.0)
}
ty::Int(IntTy::I32) => {
let x = unsafe { do_native_call::<i32>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_i32(x.0)
}
ty::Int(IntTy::I64) => {
let x = unsafe { do_native_call::<i64>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_i64(x.0)
}
ty::Int(IntTy::Isize) => {
let x = unsafe { do_native_call::<isize>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_target_isize(x.0.try_into().unwrap(), this)
}
// uints
ty::Uint(UintTy::U8) => {
let x = unsafe { do_native_call::<u8>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_u8(x.0)
}
ty::Uint(UintTy::U16) => {
let x = unsafe { do_native_call::<u16>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_u16(x.0)
}
ty::Uint(UintTy::U32) => {
let x = unsafe { do_native_call::<u32>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_u32(x.0)
}
ty::Uint(UintTy::U64) => {
let x = unsafe { do_native_call::<u64>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_u64(x.0)
}
ty::Uint(UintTy::Usize) => {
let x = unsafe { do_native_call::<usize>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
Scalar::from_target_usize(x.0.try_into().unwrap(), this)
}
// Functions with no declared return type (i.e., the default return)
// have the output_type `Tuple([])`.
ty::Tuple(t_list) if (*t_list).deref().is_empty() => {
let (_, mm) = unsafe { do_native_call::<()>(ptr, libffi_args.as_slice(), alloc) };
return interp_ok((ImmTy::uninit(dest.layout), mm));
}
ty::RawPtr(..) => {
let x = unsafe { do_native_call::<*const ()>(ptr, libffi_args.as_slice(), alloc) };
maybe_memevents = x.1;
let ptr = Pointer::new(Provenance::Wildcard, Size::from_bytes(x.0.addr()));
Scalar::from_pointer(ptr, this)
}
_ => throw_unsup_format!("unsupported return type for native call: {:?}", link_name),
let res = 'res: {
let scalar = match dest.layout.ty.kind() {
// ints
ty::Int(IntTy::I8) => {
// Unsafe because of the call to native code.
// Because this is calling a C function it is not necessarily sound,
// but there is no way around this and we've checked as much as we can.
let x = unsafe { ffi::call::<i8>(ptr, libffi_args.as_slice()) };
Scalar::from_i8(x)
}
ty::Int(IntTy::I16) => {
let x = unsafe { ffi::call::<i16>(ptr, libffi_args.as_slice()) };
Scalar::from_i16(x)
}
ty::Int(IntTy::I32) => {
let x = unsafe { ffi::call::<i32>(ptr, libffi_args.as_slice()) };
Scalar::from_i32(x)
}
ty::Int(IntTy::I64) => {
let x = unsafe { ffi::call::<i64>(ptr, libffi_args.as_slice()) };
Scalar::from_i64(x)
}
ty::Int(IntTy::Isize) => {
let x = unsafe { ffi::call::<isize>(ptr, libffi_args.as_slice()) };
Scalar::from_target_isize(x.try_into().unwrap(), this)
}
// uints
ty::Uint(UintTy::U8) => {
let x = unsafe { ffi::call::<u8>(ptr, libffi_args.as_slice()) };
Scalar::from_u8(x)
}
ty::Uint(UintTy::U16) => {
let x = unsafe { ffi::call::<u16>(ptr, libffi_args.as_slice()) };
Scalar::from_u16(x)
}
ty::Uint(UintTy::U32) => {
let x = unsafe { ffi::call::<u32>(ptr, libffi_args.as_slice()) };
Scalar::from_u32(x)
}
ty::Uint(UintTy::U64) => {
let x = unsafe { ffi::call::<u64>(ptr, libffi_args.as_slice()) };
Scalar::from_u64(x)
}
ty::Uint(UintTy::Usize) => {
let x = unsafe { ffi::call::<usize>(ptr, libffi_args.as_slice()) };
Scalar::from_target_usize(x.try_into().unwrap(), this)
}
// Functions with no declared return type (i.e., the default return)
// have the output_type `Tuple([])`.
ty::Tuple(t_list) if (*t_list).deref().is_empty() => {
unsafe { ffi::call::<()>(ptr, libffi_args.as_slice()) };
break 'res interp_ok(ImmTy::uninit(dest.layout));
}
ty::RawPtr(..) => {
let x = unsafe { ffi::call::<*const ()>(ptr, libffi_args.as_slice()) };
let ptr = Pointer::new(Provenance::Wildcard, Size::from_bytes(x.addr()));
Scalar::from_pointer(ptr, this)
}
_ =>
break 'res Err(err_unsup_format!(
"unsupported return type for native call: {:?}",
link_name
))
.into(),
};
interp_ok(ImmTy::from_scalar(scalar, dest.layout))
};
interp_ok((ImmTy::from_scalar(scalar, dest.layout), maybe_memevents))
// SAFETY: We got the guard and stack pointer from start_ffi, and
// the allocator is the same
#[cfg(target_os = "linux")]
let events = unsafe { Supervisor::end_ffi(alloc, guard, stack_ptr) };
#[cfg(not(target_os = "linux"))]
let events = None;
interp_ok((res?, events))
}
/// Get the pointer to the function of the specified name in the shared object file,
@ -205,14 +212,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// The first time this happens, print a warning.
if !this.machine.native_call_mem_warned.replace(true) {
// Newly set, so first time we get here.
#[cfg(target_os = "linux")]
if shims::trace::Supervisor::poll() {
this.emit_diagnostic(NonHaltingDiagnostic::NativeCallSharedMem);
} else {
this.emit_diagnostic(NonHaltingDiagnostic::NativeCallNoTrace);
}
#[cfg(not(target_os = "linux"))]
this.emit_diagnostic(NonHaltingDiagnostic::NativeCallSharedMem);
this.emit_diagnostic(NonHaltingDiagnostic::NativeCallSharedMem {
#[cfg(target_os = "linux")]
tracing: self::trace::Supervisor::is_enabled(),
#[cfg(not(target_os = "linux"))]
tracing: false,
});
}
this.expose_provenance(prov)?;
@ -243,48 +248,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
/// Performs the actual native call, returning the result and the events that
/// the supervisor detected (if any).
///
/// SAFETY: See `libffi::fii::call`.
#[cfg(target_os = "linux")]
unsafe fn do_native_call<T: libffi::high::CType>(
ptr: CodePtr,
args: &[ffi::Arg<'_>],
alloc: Option<Rc<RefCell<IsolatedAlloc>>>,
) -> (T, Option<shims::trace::messages::MemEvents>) {
use shims::trace::Supervisor;
unsafe {
if let Some(alloc) = alloc {
// SAFETY: We don't touch the machine memory past this point.
let (guard, stack_ptr) = Supervisor::start_ffi(alloc.clone());
// SAFETY: Upheld by caller.
let ret = ffi::call(ptr, args);
// SAFETY: We got the guard and stack pointer from start_ffi, and
// the allocator is the same.
(ret, Supervisor::end_ffi(guard, alloc, stack_ptr))
} else {
// SAFETY: Upheld by caller.
(ffi::call(ptr, args), None)
}
}
}
/// Performs the actual native call, returning the result and a `None`.
/// Placeholder for platforms that do not support the ptrace supervisor.
///
/// SAFETY: See `libffi::fii::call`.
#[cfg(not(target_os = "linux"))]
#[inline(always)]
unsafe fn do_native_call<T: libffi::high::CType>(
ptr: CodePtr,
args: &[ffi::Arg<'_>],
_alloc: (),
) -> (T, Option<!>) {
(unsafe { ffi::call(ptr, args) }, None)
}
#[derive(Debug, Clone)]
/// Enum of supported arguments to external C functions.
// We introduce this enum instead of just calling `ffi::arg` and storing a list

View file

@ -30,7 +30,7 @@ pub struct SvInitError;
impl Supervisor {
/// Returns `true` if the supervisor process exists, and `false` otherwise.
pub fn poll() -> bool {
pub fn is_enabled() -> bool {
SUPERVISOR.lock().unwrap().is_some()
}
@ -45,7 +45,7 @@ impl Supervisor {
/// SAFETY: The resulting guard must be dropped *via `end_ffi`* immediately
/// after the desired call has concluded.
pub unsafe fn start_ffi(
alloc: Rc<RefCell<IsolatedAlloc>>,
alloc: &Rc<RefCell<IsolatedAlloc>>,
) -> (std::sync::MutexGuard<'static, Option<Supervisor>>, Option<*mut [u8; FAKE_STACK_SIZE]>)
{
let mut sv_guard = SUPERVISOR.lock().unwrap();
@ -99,8 +99,8 @@ impl Supervisor {
/// received by a prior call to `start_ffi`, and the allocator must be the
/// one passed to it also.
pub unsafe fn end_ffi(
alloc: &Rc<RefCell<IsolatedAlloc>>,
mut sv_guard: std::sync::MutexGuard<'static, Option<Supervisor>>,
alloc: Rc<RefCell<IsolatedAlloc>>,
raw_stack_ptr: Option<*mut [u8; FAKE_STACK_SIZE]>,
) -> Option<MemEvents> {
// We can't use IPC channels here to signal that FFI mode has ended,

View file

@ -4,8 +4,8 @@ use ipc_channel::ipc;
use nix::sys::{ptrace, signal, wait};
use nix::unistd;
use crate::shims::trace::messages::{Confirmation, MemEvents, TraceRequest};
use crate::shims::trace::{AccessEvent, FAKE_STACK_SIZE, StartFfiInfo};
use super::messages::{Confirmation, MemEvents, TraceRequest};
use super::{AccessEvent, FAKE_STACK_SIZE, StartFfiInfo};
/// The flags to use when calling `waitid()`.
/// Since bitwise or on the nix version of these flags is implemented as a trait,
@ -532,10 +532,11 @@ fn handle_segfault(
if ch_pages.iter().any(|pg| (*pg..pg.strict_add(page_size)).contains(&addr)) {
// Overall structure:
// - Get the address that caused the segfault
// - Unprotect the memory
// - Unprotect the memory: we force the child to execute `mempr_off`, passing
// parameters via global atomic variables.
// - Step 1 instruction
// - Parse executed code to estimate size & type of access
// - Reprotect the memory
// - Reprotect the memory by executing `mempr_on` in the child.
// - Continue
// Ensure the stack is properly zeroed out!
@ -606,7 +607,7 @@ fn handle_segfault(
ret
});
// Now figure out the size + type of access and log it down
// Now figure out the size + type of access and log it down.
// This will mark down e.g. the same area being read multiple times,
// since it's more efficient to compress the accesses at the end.
if capstone_disassemble(&instr, addr, cs, acc_events).is_err() {

View file

@ -4,8 +4,8 @@ warning: sharing memory with a native function called via FFI
LL | unsafe { print_pointer(&x) };
| ^^^^^^^^^^^^^^^^^ sharing memory with a native function
|
= help: when memory is shared with a native function call, Miri can only track initialisation and provenance on a best-effort basis
= help: in particular, Miri assumes that the native call initializes all memory it has written to
= help: when memory is shared with a native function call, Miri stops tracking initialization and provenance for that memory
= help: in particular, Miri assumes that the native call initializes all memory it has access to
= help: Miri also assumes that any part of this memory may be a pointer that is permitted to point to arbitrary exposed memory
= help: what this means is that Miri will easily miss Undefined Behavior related to incorrect usage of this shared memory, so you should not take a clean Miri run as a signal that your FFI code is UB-free
= note: BACKTRACE:

View file

@ -4,8 +4,8 @@ warning: sharing memory with a native function called via FFI
LL | unsafe { increment_int(&mut x) };
| ^^^^^^^^^^^^^^^^^^^^^ sharing memory with a native function
|
= help: when memory is shared with a native function call, Miri can only track initialisation and provenance on a best-effort basis
= help: in particular, Miri assumes that the native call initializes all memory it has written to
= help: when memory is shared with a native function call, Miri stops tracking initialization and provenance for that memory
= help: in particular, Miri assumes that the native call initializes all memory it has access to
= help: Miri also assumes that any part of this memory may be a pointer that is permitted to point to arbitrary exposed memory
= help: what this means is that Miri will easily miss Undefined Behavior related to incorrect usage of this shared memory, so you should not take a clean Miri run as a signal that your FFI code is UB-free
= note: BACKTRACE: