Auto merge of #136316 - GrigorenkoPV:generic_atomic, r=Mark-Simulacrum

Create `Atomic<T>` type alias (rebase)

Rebase of #130543.

Additional changes:
- Switch from `allow` to `expect` for `private_bounds` on `AtomicPrimitive`
- Unhide `AtomicPrimitive::AtomicInner` from docs, because rustdoc shows the definition `pub type Atomic<T> = <T as AtomicPrimitive>::AtomicInner;` and generated links for it.
  - `NonZero` did not have this issue, because they kept the new alias private before the direction was changed.
- Use `Atomic<_>` in more places, including inside `Once`'s `Futex`. This is possible thanks to https://github.com/rust-lang/rust-clippy/pull/14125

The rest will either get moved back to #130543 or #130543 will be closed in favor of this instead.

---

* ACP: https://github.com/rust-lang/libs-team/issues/443#event-14293381061
* Tracking issue: #130539
This commit is contained in:
bors 2025-04-28 05:12:59 +00:00
commit 0134651fb8
78 changed files with 370 additions and 265 deletions

View file

@ -122,6 +122,7 @@
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(formatting_options)]
#![feature(generic_atomic)]
#![feature(hasher_prefixfree_extras)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]

View file

@ -26,8 +26,8 @@ use core::pin::{Pin, PinCoerceUnsized};
use core::ptr::{self, NonNull};
#[cfg(not(no_global_oom_handling))]
use core::slice::from_raw_parts_mut;
use core::sync::atomic;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use core::sync::atomic::{self, Atomic};
use core::{borrow, fmt, hint};
#[cfg(not(no_global_oom_handling))]
@ -369,12 +369,12 @@ impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
// inner types.
#[repr(C)]
struct ArcInner<T: ?Sized> {
strong: atomic::AtomicUsize,
strong: Atomic<usize>,
// the value usize::MAX acts as a sentinel for temporarily "locking" the
// ability to upgrade weak pointers or downgrade strong ones; this is used
// to avoid races in `make_mut` and `get_mut`.
weak: atomic::AtomicUsize,
weak: Atomic<usize>,
data: T,
}
@ -2813,8 +2813,8 @@ impl<T, A: Allocator> Weak<T, A> {
/// Helper type to allow accessing the reference counts without
/// making any assertions about the data field.
struct WeakInner<'a> {
weak: &'a atomic::AtomicUsize,
strong: &'a atomic::AtomicUsize,
weak: &'a Atomic<usize>,
strong: &'a Atomic<usize>,
}
impl<T: ?Sized> Weak<T> {

View file

@ -247,6 +247,100 @@ use crate::cell::UnsafeCell;
use crate::hint::spin_loop;
use crate::{fmt, intrinsics};
trait Sealed {}
/// A marker trait for primitive types which can be modified atomically.
///
/// This is an implementation detail for <code>[Atomic]\<T></code> which may disappear or be replaced at any time.
///
/// # Safety
///
/// Types implementing this trait must be primitives that can be modified atomically.
///
/// The associated `Self::AtomicInner` type must have the same size and bit validity as `Self`,
/// but may have a higher alignment requirement, so the following `transmute`s are sound:
///
/// - `&mut Self::AtomicInner` as `&mut Self`
/// - `Self` as `Self::AtomicInner` or the reverse
#[unstable(
feature = "atomic_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
#[expect(private_bounds)]
pub unsafe trait AtomicPrimitive: Sized + Copy + Sealed {
/// Temporary implementation detail.
type AtomicInner: Sized;
}
macro impl_atomic_primitive(
$Atom:ident $(<$T:ident>)? ($Primitive:ty),
size($size:literal),
align($align:literal) $(,)?
) {
impl $(<$T>)? Sealed for $Primitive {}
#[unstable(
feature = "atomic_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
#[cfg(target_has_atomic_load_store = $size)]
unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
type AtomicInner = $Atom $(<$T>)?;
}
}
impl_atomic_primitive!(AtomicBool(bool), size("8"), align(1));
impl_atomic_primitive!(AtomicI8(i8), size("8"), align(1));
impl_atomic_primitive!(AtomicU8(u8), size("8"), align(1));
impl_atomic_primitive!(AtomicI16(i16), size("16"), align(2));
impl_atomic_primitive!(AtomicU16(u16), size("16"), align(2));
impl_atomic_primitive!(AtomicI32(i32), size("32"), align(4));
impl_atomic_primitive!(AtomicU32(u32), size("32"), align(4));
impl_atomic_primitive!(AtomicI64(i64), size("64"), align(8));
impl_atomic_primitive!(AtomicU64(u64), size("64"), align(8));
impl_atomic_primitive!(AtomicI128(i128), size("128"), align(16));
impl_atomic_primitive!(AtomicU128(u128), size("128"), align(16));
#[cfg(target_pointer_width = "16")]
impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(2));
#[cfg(target_pointer_width = "32")]
impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(4));
#[cfg(target_pointer_width = "64")]
impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(8));
#[cfg(target_pointer_width = "16")]
impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(2));
#[cfg(target_pointer_width = "32")]
impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(4));
#[cfg(target_pointer_width = "64")]
impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(8));
#[cfg(target_pointer_width = "16")]
impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(2));
#[cfg(target_pointer_width = "32")]
impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(4));
#[cfg(target_pointer_width = "64")]
impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(8));
/// A memory location which can be safely modified from multiple threads.
///
/// This has the same size and bit validity as the underlying type `T`. However,
/// the alignment of this type is always equal to its size, even on targets where
/// `T` has alignment less than its size.
///
/// For more about the differences between atomic types and non-atomic types as
/// well as information about the portability of this type, please see the
/// [module-level documentation].
///
/// **Note:** This type is only available on platforms that support atomic loads
/// and stores of `T`.
///
/// [module-level documentation]: crate::sync::atomic
#[unstable(feature = "generic_atomic", issue = "130539")]
pub type Atomic<T> = <T as AtomicPrimitive>::AtomicInner;
// Some architectures don't have byte-sized atomics, which results in LLVM
// emulating them using a LL/SC loop. However for AtomicBool we can take
// advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND

View file

@ -57,7 +57,7 @@
#![stable(feature = "alloc_module", since = "1.28.0")]
use core::ptr::NonNull;
use core::sync::atomic::{AtomicPtr, Ordering};
use core::sync::atomic::{Atomic, AtomicPtr, Ordering};
use core::{hint, mem, ptr};
#[stable(feature = "alloc_module", since = "1.28.0")]
@ -287,7 +287,7 @@ unsafe impl Allocator for System {
}
}
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
static HOOK: Atomic<*mut ()> = AtomicPtr::new(ptr::null_mut());
/// Registers a custom allocation error hook, replacing any that was previously registered.
///

View file

@ -92,8 +92,8 @@ use crate::backtrace_rs::{self, BytesOrWideString};
use crate::ffi::c_void;
use crate::panic::UnwindSafe;
use crate::sync::LazyLock;
use crate::sync::atomic::AtomicU8;
use crate::sync::atomic::Ordering::Relaxed;
use crate::sync::atomic::{Atomic, AtomicU8};
use crate::sys::backtrace::{lock, output_filename, set_image_base};
use crate::{env, fmt};
@ -254,7 +254,7 @@ impl Backtrace {
// Cache the result of reading the environment variables to make
// backtrace captures speedy, because otherwise reading environment
// variables every time can be somewhat slow.
static ENABLED: AtomicU8 = AtomicU8::new(0);
static ENABLED: Atomic<u8> = AtomicU8::new(0);
match ENABLED.load(Relaxed) {
0 => {}
1 => return false,

View file

@ -11,7 +11,7 @@ use crate::io::{
self, BorrowedCursor, BufReader, IoSlice, IoSliceMut, LineWriter, Lines, SpecReadByte,
};
use crate::panic::{RefUnwindSafe, UnwindSafe};
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantLock, ReentrantLockGuard};
use crate::sys::stdio;
use crate::thread::AccessError;
@ -37,7 +37,7 @@ thread_local! {
/// have a consistent order between set_output_capture and print_to *within
/// the same thread*. Within the same thread, things always have a perfectly
/// consistent order. So Ordering::Relaxed is fine.
static OUTPUT_CAPTURE_USED: AtomicBool = AtomicBool::new(false);
static OUTPUT_CAPTURE_USED: Atomic<bool> = AtomicBool::new(false);
/// A handle to a raw instance of the standard input stream of this process.
///

View file

@ -343,6 +343,7 @@
#![feature(float_gamma)]
#![feature(float_minimum_maximum)]
#![feature(fmt_internals)]
#![feature(generic_atomic)]
#![feature(hasher_prefixfree_extras)]
#![feature(hashmap_internals)]
#![feature(hint_must_use)]

View file

@ -4,13 +4,13 @@
use crate::ffi::c_void;
use crate::ptr::NonNull;
use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, Ordering};
static SYSTEM_TABLE: AtomicPtr<c_void> = AtomicPtr::new(crate::ptr::null_mut());
static IMAGE_HANDLE: AtomicPtr<c_void> = AtomicPtr::new(crate::ptr::null_mut());
static SYSTEM_TABLE: Atomic<*mut c_void> = AtomicPtr::new(crate::ptr::null_mut());
static IMAGE_HANDLE: Atomic<*mut c_void> = AtomicPtr::new(crate::ptr::null_mut());
// Flag to check if BootServices are still valid.
// Start with assuming that they are not available
static BOOT_SERVICES_FLAG: AtomicBool = AtomicBool::new(false);
static BOOT_SERVICES_FLAG: Atomic<bool> = AtomicBool::new(false);
/// Initializes the global System Table and Image Handle pointers.
///

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicU32, Ordering};
use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use crate::os::xous::ffi::Connection;
@ -106,7 +106,7 @@ pub fn try_connect(name: &str) -> Option<Connection> {
ns::try_connect_with_name(name)
}
static NAME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
static NAME_SERVER_CONNECTION: Atomic<u32> = AtomicU32::new(0);
/// Returns a `Connection` to the name server. If the name server has not been started,
/// then this call will block until the name server has been started. The `Connection`

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicU32, Ordering};
use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use crate::os::xous::ffi::Connection;
use crate::os::xous::services::connect;
@ -17,7 +17,7 @@ impl Into<usize> for DnsLendMut {
/// Returns a `Connection` to the DNS lookup server. This server is used for
/// querying domain name values.
pub(crate) fn dns_server() -> Connection {
static DNS_CONNECTION: AtomicU32 = AtomicU32::new(0);
static DNS_CONNECTION: Atomic<u32> = AtomicU32::new(0);
let cid = DNS_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicU32, Ordering};
use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use crate::os::xous::ffi::Connection;
@ -64,7 +64,7 @@ impl Into<usize> for LogLend {
/// running. It is safe to call this multiple times, because the address is
/// shared among all threads in a process.
pub(crate) fn log_server() -> Connection {
static LOG_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
static LOG_SERVER_CONNECTION: Atomic<u32> = AtomicU32::new(0);
let cid = LOG_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicU32, Ordering};
use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use crate::os::xous::ffi::Connection;
use crate::os::xous::services::connect;
@ -84,7 +84,7 @@ impl<'a> Into<[usize; 5]> for NetBlockingScalar {
/// Returns a `Connection` to the Network server. This server provides all
/// OS-level networking functions.
pub(crate) fn net_server() -> Connection {
static NET_CONNECTION: AtomicU32 = AtomicU32::new(0);
static NET_CONNECTION: Atomic<u32> = AtomicU32::new(0);
let cid = NET_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicU32, Ordering};
use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use crate::os::xous::ffi::{Connection, connect};
@ -17,7 +17,7 @@ impl Into<[usize; 5]> for SystimeScalar {
/// Returns a `Connection` to the systime server. This server is used for reporting the
/// realtime clock.
pub(crate) fn systime_server() -> Connection {
static SYSTIME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
static SYSTIME_SERVER_CONNECTION: Atomic<u32> = AtomicU32::new(0);
let cid = SYSTIME_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicU32, Ordering};
use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use crate::os::xous::ffi::Connection;
@ -31,7 +31,7 @@ impl Into<[usize; 5]> for TicktimerScalar {
/// Returns a `Connection` to the ticktimer server. This server is used for synchronization
/// primitives such as sleep, Mutex, and Condvar.
pub(crate) fn ticktimer_server() -> Connection {
static TICKTIMER_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0);
static TICKTIMER_SERVER_CONNECTION: Atomic<u32> = AtomicU32::new(0);
let cid = TICKTIMER_SERVER_CONNECTION.load(Ordering::Relaxed);
if cid != 0 {
return cid.into();

View file

@ -3,7 +3,7 @@
#![stable(feature = "std_panic", since = "1.9.0")]
use crate::any::Any;
use crate::sync::atomic::{AtomicU8, Ordering};
use crate::sync::atomic::{Atomic, AtomicU8, Ordering};
use crate::sync::{Condvar, Mutex, RwLock};
use crate::thread::Result;
use crate::{collections, fmt, panicking};
@ -469,7 +469,7 @@ impl BacktraceStyle {
// that backtrace.
//
// Internally stores equivalent of an Option<BacktraceStyle>.
static SHOULD_CAPTURE: AtomicU8 = AtomicU8::new(0);
static SHOULD_CAPTURE: Atomic<u8> = AtomicU8::new(0);
/// Configures whether the default panic hook will capture and display a
/// backtrace.

View file

@ -21,7 +21,7 @@ use crate::any::Any;
use crate::io::try_set_output_capture;
use crate::mem::{self, ManuallyDrop};
use crate::panic::{BacktraceStyle, PanicHookInfo};
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
use crate::sync::{PoisonError, RwLock};
use crate::sys::backtrace;
use crate::sys::stdio::panic_output;
@ -289,7 +289,7 @@ fn default_hook(info: &PanicHookInfo<'_>) {
};
});
static FIRST_PANIC: AtomicBool = AtomicBool::new(true);
static FIRST_PANIC: Atomic<bool> = AtomicBool::new(true);
match backtrace {
// SAFETY: we took out a lock just a second ago.
@ -374,7 +374,7 @@ pub mod panic_count {
#[unstable(feature = "update_panic_count", issue = "none")]
pub mod panic_count {
use crate::cell::Cell;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
const ALWAYS_ABORT_FLAG: usize = 1 << (usize::BITS - 1);
@ -416,7 +416,7 @@ pub mod panic_count {
//
// Stealing a bit is fine because it just amounts to assuming that each
// panicking thread consumes at least 2 bytes of address space.
static GLOBAL_PANIC_COUNT: AtomicUsize = AtomicUsize::new(0);
static GLOBAL_PANIC_COUNT: Atomic<usize> = AtomicUsize::new(0);
// Increases the global and local panic count, and returns whether an
// immediate abort is required.

View file

@ -16,13 +16,13 @@ use super::waker::SyncWaker;
use crate::cell::UnsafeCell;
use crate::mem::MaybeUninit;
use crate::ptr;
use crate::sync::atomic::{self, AtomicUsize, Ordering};
use crate::sync::atomic::{self, Atomic, AtomicUsize, Ordering};
use crate::time::Instant;
/// A slot in a channel.
struct Slot<T> {
/// The current stamp.
stamp: AtomicUsize,
stamp: Atomic<usize>,
/// The message in this slot. Either read out in `read` or dropped through
/// `discard_all_messages`.
@ -55,7 +55,7 @@ pub(crate) struct Channel<T> {
/// represent the lap. The mark bit in the head is always zero.
///
/// Messages are popped from the head of the channel.
head: CachePadded<AtomicUsize>,
head: CachePadded<Atomic<usize>>,
/// The tail of the channel.
///
@ -64,7 +64,7 @@ pub(crate) struct Channel<T> {
/// represent the lap. The mark bit indicates that the channel is disconnected.
///
/// Messages are pushed into the tail of the channel.
tail: CachePadded<AtomicUsize>,
tail: CachePadded<Atomic<usize>>,
/// The buffer holding slots.
buffer: Box<[Slot<T>]>,

View file

@ -5,7 +5,7 @@ use super::waker::current_thread_id;
use crate::cell::Cell;
use crate::ptr;
use crate::sync::Arc;
use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicPtr, AtomicUsize, Ordering};
use crate::thread::{self, Thread};
use crate::time::Instant;
@ -19,10 +19,10 @@ pub struct Context {
#[derive(Debug)]
struct Inner {
/// Selected operation.
select: AtomicUsize,
select: Atomic<usize>,
/// A slot into which another thread may store a pointer to its `Packet`.
packet: AtomicPtr<()>,
packet: Atomic<*mut ()>,
/// Thread handle.
thread: Thread,

View file

@ -1,16 +1,16 @@
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering};
use crate::{ops, process};
/// Reference counter internals.
struct Counter<C> {
/// The number of senders associated with the channel.
senders: AtomicUsize,
senders: Atomic<usize>,
/// The number of receivers associated with the channel.
receivers: AtomicUsize,
receivers: Atomic<usize>,
/// Set to `true` if the last sender or the last receiver reference deallocates the channel.
destroy: AtomicBool,
destroy: Atomic<bool>,
/// The internal channel.
chan: C,

View file

@ -9,7 +9,7 @@ use crate::cell::UnsafeCell;
use crate::marker::PhantomData;
use crate::mem::MaybeUninit;
use crate::ptr;
use crate::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use crate::sync::atomic::{self, Atomic, AtomicPtr, AtomicUsize, Ordering};
use crate::time::Instant;
// Bits indicating the state of a slot:
@ -37,7 +37,7 @@ struct Slot<T> {
msg: UnsafeCell<MaybeUninit<T>>,
/// The state of the slot.
state: AtomicUsize,
state: Atomic<usize>,
}
impl<T> Slot<T> {
@ -55,7 +55,7 @@ impl<T> Slot<T> {
/// Each block in the list can hold up to `BLOCK_CAP` messages.
struct Block<T> {
/// The next block in the linked list.
next: AtomicPtr<Block<T>>,
next: Atomic<*mut Block<T>>,
/// Slots for messages.
slots: [Slot<T>; BLOCK_CAP],
@ -65,11 +65,11 @@ impl<T> Block<T> {
/// Creates an empty block.
fn new() -> Box<Block<T>> {
// SAFETY: This is safe because:
// [1] `Block::next` (AtomicPtr) may be safely zero initialized.
// [1] `Block::next` (Atomic<*mut _>) may be safely zero initialized.
// [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].
// [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it
// holds a MaybeUninit.
// [4] `Slot::state` (AtomicUsize) may be safely zero initialized.
// [4] `Slot::state` (Atomic<usize>) may be safely zero initialized.
unsafe { Box::new_zeroed().assume_init() }
}
@ -110,10 +110,10 @@ impl<T> Block<T> {
#[derive(Debug)]
struct Position<T> {
/// The index in the channel.
index: AtomicUsize,
index: Atomic<usize>,
/// The block in the linked list.
block: AtomicPtr<Block<T>>,
block: Atomic<*mut Block<T>>,
}
/// The token type for the list flavor.

View file

@ -4,7 +4,7 @@ use super::context::Context;
use super::select::{Operation, Selected};
use crate::ptr;
use crate::sync::Mutex;
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
/// Represents a thread blocked on a specific channel operation.
pub(crate) struct Entry {
@ -137,7 +137,7 @@ pub(crate) struct SyncWaker {
inner: Mutex<Waker>,
/// `true` if the waker is empty.
is_empty: AtomicBool,
is_empty: Atomic<bool>,
}
impl SyncWaker {

View file

@ -10,7 +10,7 @@ use super::waker::Waker;
use crate::cell::UnsafeCell;
use crate::marker::PhantomData;
use crate::sync::Mutex;
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
use crate::time::Instant;
use crate::{fmt, ptr};
@ -35,7 +35,7 @@ struct Packet<T> {
on_stack: bool,
/// Equals `true` once the packet is ready for reading or writing.
ready: AtomicBool,
ready: Atomic<bool>,
/// The message.
msg: UnsafeCell<Option<T>>,

View file

@ -76,7 +76,7 @@ pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use crate::error::Error;
use crate::fmt;
#[cfg(panic = "unwind")]
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
#[cfg(panic = "unwind")]
use crate::thread;
@ -88,7 +88,7 @@ mod rwlock;
pub(crate) struct Flag {
#[cfg(panic = "unwind")]
failed: AtomicBool,
failed: Atomic<bool>,
}
// Note that the Ordering uses to access the `failed` field of `Flag` below is

View file

@ -89,9 +89,9 @@ pub struct ReentrantLock<T: ?Sized> {
cfg_if!(
if #[cfg(target_has_atomic = "64")] {
use crate::sync::atomic::{AtomicU64, Ordering::Relaxed};
use crate::sync::atomic::{Atomic, AtomicU64, Ordering::Relaxed};
struct Tid(AtomicU64);
struct Tid(Atomic<u64>);
impl Tid {
const fn new() -> Self {
@ -120,6 +120,7 @@ cfg_if!(
}
use crate::sync::atomic::{
Atomic,
AtomicUsize,
Ordering,
};
@ -137,7 +138,7 @@ cfg_if!(
// the current thread, or by a thread that has terminated before
// the current thread was created. In either case, no further
// synchronization is needed (as per <https://github.com/rust-lang/miri/issues/3450>)
tls_addr: AtomicUsize,
tls_addr: Atomic<usize>,
tid: UnsafeCell<u64>,
}

View file

@ -1,6 +1,6 @@
use crate::alloc::{GlobalAlloc, Layout, System};
use crate::ptr;
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
use crate::sys::pal::abi::mem as sgx_mem;
use crate::sys::pal::waitqueue::SpinMutex;
@ -22,7 +22,7 @@ struct Sgx;
unsafe impl dlmalloc::Allocator for Sgx {
/// Allocs system resources
fn alloc(&self, _size: usize) -> (*mut u8, usize, u32) {
static INIT: AtomicBool = AtomicBool::new(false);
static INIT: Atomic<bool> = AtomicBool::new(false);
// No ordering requirement since this function is protected by the global lock.
if !INIT.swap(true, Ordering::Relaxed) {

View file

@ -60,10 +60,10 @@ unsafe impl GlobalAlloc for System {
#[cfg(target_feature = "atomics")]
mod lock {
use crate::sync::atomic::AtomicI32;
use crate::sync::atomic::Ordering::{Acquire, Release};
use crate::sync::atomic::{Atomic, AtomicI32};
static LOCKED: AtomicI32 = AtomicI32::new(0);
static LOCKED: Atomic<i32> = AtomicI32::new(0);
pub struct DropLock;

View file

@ -49,10 +49,10 @@ unsafe impl GlobalAlloc for System {
}
mod lock {
use crate::sync::atomic::AtomicI32;
use crate::sync::atomic::Ordering::{Acquire, Release};
use crate::sync::atomic::{Atomic, AtomicI32};
static LOCKED: AtomicI32 = AtomicI32::new(0);
static LOCKED: Atomic<i32> = AtomicI32::new(0);
pub struct DropLock;

View file

@ -1,7 +1,7 @@
#![allow(fuzzy_provenance_casts)] // FIXME: this module systematically confuses pointers and integers
use crate::ffi::OsString;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::sys::os_str::Buf;
use crate::sys::pal::abi::usercalls::alloc;
use crate::sys::pal::abi::usercalls::raw::ByteBuffer;
@ -11,7 +11,7 @@ use crate::{fmt, slice};
// Specifying linkage/symbol name is solely to ensure a single instance between this crate and its unit tests
#[cfg_attr(test, linkage = "available_externally")]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE")]
static ARGS: AtomicUsize = AtomicUsize::new(0);
static ARGS: Atomic<usize> = AtomicUsize::new(0);
type ArgsStore = Vec<OsString>;
#[cfg_attr(test, allow(dead_code))]

View file

@ -88,7 +88,7 @@ pub fn args() -> Args {
mod imp {
use crate::ffi::c_char;
use crate::ptr;
use crate::sync::atomic::{AtomicIsize, AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicIsize, AtomicPtr, Ordering};
// The system-provided argc and argv, which we store in static memory
// here so that we can defer the work of parsing them until its actually
@ -96,8 +96,8 @@ mod imp {
//
// Note that we never mutate argv/argc, the argv array, or the argv
// strings, which allows the code in this file to be very simple.
static ARGC: AtomicIsize = AtomicIsize::new(0);
static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut());
static ARGC: Atomic<isize> = AtomicIsize::new(0);
static ARGV: Atomic<*mut *const u8> = AtomicPtr::new(ptr::null_mut());
unsafe fn really_init(argc: isize, argv: *const *const u8) {
// These don't need to be ordered with each other or other stores,

View file

@ -4,13 +4,13 @@ pub use super::common::Env;
use crate::collections::HashMap;
use crate::ffi::{OsStr, OsString};
use crate::io;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::sync::{Mutex, Once};
// Specifying linkage/symbol name is solely to ensure a single instance between this crate and its unit tests
#[cfg_attr(test, linkage = "available_externally")]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys3pal3sgx2os3ENVE")]
static ENV: AtomicUsize = AtomicUsize::new(0);
static ENV: Atomic<usize> = AtomicUsize::new(0);
// Specifying linkage/symbol name is solely to ensure a single instance between this crate and its unit tests
#[cfg_attr(test, linkage = "available_externally")]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys3pal3sgx2os8ENV_INITE")]

View file

@ -2,11 +2,11 @@ pub use super::common::Env;
use crate::collections::HashMap;
use crate::ffi::{OsStr, OsString};
use crate::io;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::sync::{Mutex, Once};
use crate::sys::pal::os::{get_application_parameters, params};
static ENV: AtomicUsize = AtomicUsize::new(0);
static ENV: Atomic<usize> = AtomicUsize::new(0);
static ENV_INIT: Once = Once::new();
type EnvStore = Mutex<HashMap<OsString, OsString>>;

View file

@ -147,14 +147,14 @@ cfg_has_statx! {{
flags: i32,
mask: u32,
) -> Option<io::Result<FileAttr>> {
use crate::sync::atomic::{AtomicU8, Ordering};
use crate::sync::atomic::{Atomic, AtomicU8, Ordering};
// Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`.
// We check for it on first failure and remember availability to avoid having to
// do it again.
#[repr(u8)]
enum STATX_STATE{ Unknown = 0, Present, Unavailable }
static STATX_SAVED_STATE: AtomicU8 = AtomicU8::new(STATX_STATE::Unknown as u8);
static STATX_SAVED_STATE: Atomic<u8> = AtomicU8::new(STATX_STATE::Unknown as u8);
syscall!(
fn statx(

View file

@ -29,7 +29,7 @@
//! race but we do make a best effort such that it *should* do so.
use core::ptr;
use core::sync::atomic::{AtomicU32, Ordering};
use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use super::{AsRawHandle, DirBuff, File, FromRawHandle};
use crate::sys::c;
@ -87,7 +87,7 @@ fn open_link_no_reparse(
// The `OBJ_DONT_REPARSE` attribute ensures that we haven't been
// tricked into following a symlink. However, it may not be available in
// earlier versions of Windows.
static ATTRIBUTES: AtomicU32 = AtomicU32::new(c::OBJ_DONT_REPARSE);
static ATTRIBUTES: Atomic<u32> = AtomicU32::new(c::OBJ_DONT_REPARSE);
let result = unsafe {
let mut path_str = c::UNICODE_STRING::from_ref(path);

View file

@ -1,5 +1,5 @@
use core::convert::TryInto;
use core::sync::atomic::{AtomicBool, AtomicU16, AtomicUsize, Ordering};
use core::sync::atomic::{Atomic, AtomicBool, AtomicU16, AtomicUsize, Ordering};
use super::*;
use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
@ -18,10 +18,10 @@ macro_rules! unimpl {
#[derive(Clone)]
pub struct TcpListener {
fd: Arc<AtomicU16>,
fd: Arc<Atomic<u16>>,
local: SocketAddr,
handle_count: Arc<AtomicUsize>,
nonblocking: Arc<AtomicBool>,
handle_count: Arc<Atomic<usize>>,
nonblocking: Arc<Atomic<bool>>,
}
impl TcpListener {

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering};
use core::sync::atomic::{Atomic, AtomicBool, AtomicU32, AtomicUsize, Ordering};
use super::*;
use crate::fmt;
@ -29,11 +29,11 @@ pub struct TcpStream {
remote_port: u16,
peer_addr: SocketAddr,
// milliseconds
read_timeout: Arc<AtomicU32>,
read_timeout: Arc<Atomic<u32>>,
// milliseconds
write_timeout: Arc<AtomicU32>,
handle_count: Arc<AtomicUsize>,
nonblocking: Arc<AtomicBool>,
write_timeout: Arc<Atomic<u32>>,
handle_count: Arc<Atomic<usize>>,
nonblocking: Arc<Atomic<bool>>,
}
fn sockaddr_to_buf(duration: Duration, addr: &SocketAddr, buf: &mut [u8]) {

View file

@ -1,5 +1,5 @@
use core::convert::TryInto;
use core::sync::atomic::{AtomicUsize, Ordering};
use core::sync::atomic::{Atomic, AtomicUsize, Ordering};
use super::*;
use crate::cell::Cell;
@ -27,7 +27,7 @@ pub struct UdpSocket {
read_timeout: Cell<u64>,
// in milliseconds. The setting applies only to `send` calls after the timeout is set.
write_timeout: Cell<u64>,
handle_count: Arc<AtomicUsize>,
handle_count: Arc<Atomic<usize>>,
nonblocking: Cell<bool>,
}

View file

@ -1,19 +1,19 @@
use super::hermit_abi;
use crate::ptr::null;
use crate::sync::atomic::AtomicU32;
use crate::sync::atomic::Atomic;
use crate::time::Duration;
/// An atomic for use as a futex that is at least 32-bits but may be larger
pub type Futex = AtomicU32;
pub type Futex = Atomic<Primitive>;
/// Must be the underlying type of Futex
pub type Primitive = u32;
/// An atomic for use as a futex that is at least 8-bits but may be larger.
pub type SmallFutex = AtomicU32;
pub type SmallFutex = Atomic<SmallPrimitive>;
/// Must be the underlying type of SmallFutex
pub type SmallPrimitive = u32;
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
pub fn futex_wait(futex: &Atomic<u32>, expected: u32, timeout: Option<Duration>) -> bool {
// Calculate the timeout as a relative timespec.
//
// Overflows are rounded up to an infinite timeout (None).
@ -37,12 +37,12 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
}
#[inline]
pub fn futex_wake(futex: &AtomicU32) -> bool {
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
unsafe { hermit_abi::futex_wake(futex.as_ptr(), 1) > 0 }
}
#[inline]
pub fn futex_wake_all(futex: &AtomicU32) {
pub fn futex_wake_all(futex: &Atomic<u32>) {
unsafe {
hermit_abi::futex_wake(futex.as_ptr(), i32::MAX);
}

View file

@ -1,12 +1,12 @@
use super::abi;
use crate::cell::UnsafeCell;
use crate::mem::MaybeUninit;
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering};
/// A mutex implemented by `dis_dsp` (for intra-core synchronization) and a
/// spinlock (for inter-core synchronization).
pub struct SpinMutex<T = ()> {
locked: AtomicBool,
locked: Atomic<bool>,
data: UnsafeCell<T>,
}
@ -19,7 +19,7 @@ impl<T> SpinMutex<T> {
/// Acquire a lock.
#[inline]
pub fn with_locked<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
struct SpinMutexGuard<'a>(&'a AtomicBool);
struct SpinMutexGuard<'a>(&'a Atomic<bool>);
impl Drop for SpinMutexGuard<'_> {
#[inline]
@ -50,7 +50,7 @@ impl<T> SpinMutex<T> {
/// It's assumed that `0` is not a valid ID, and all kernel
/// object IDs fall into range `1..=usize::MAX`.
pub struct SpinIdOnceCell<T = ()> {
id: AtomicUsize,
id: Atomic<usize>,
spin: SpinMutex<()>,
extra: UnsafeCell<MaybeUninit<T>>,
}

View file

@ -9,7 +9,7 @@ use crate::ffi::CStr;
use crate::mem::ManuallyDrop;
use crate::num::NonZero;
use crate::ptr::NonNull;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::time::Duration;
use crate::{hint, io};
@ -64,7 +64,7 @@ struct ThreadInner {
/// '--> JOIN_FINALIZE ---'
/// (-1)
///
lifecycle: AtomicUsize,
lifecycle: Atomic<usize>,
}
// Safety: The only `!Sync` field, `ThreadInner::start`, is only touched by

View file

@ -1,7 +1,7 @@
#![cfg_attr(test, allow(unused))] // RT initialization logic is not compiled for test
use core::arch::global_asm;
use core::sync::atomic::{AtomicUsize, Ordering};
use core::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::io::Write;
@ -31,7 +31,7 @@ unsafe extern "C" fn tcs_init(secondary: bool) {
const BUSY: usize = 1;
const DONE: usize = 2;
// Three-state spin-lock
static RELOC_STATE: AtomicUsize = AtomicUsize::new(UNINIT);
static RELOC_STATE: Atomic<usize> = AtomicUsize::new(UNINIT);
if secondary && RELOC_STATE.load(Ordering::Relaxed) != DONE {
rtabort!("Entered secondary TCS before main TCS!")

View file

@ -3,7 +3,7 @@ mod sync_bitset;
use self::sync_bitset::*;
use crate::cell::Cell;
use crate::num::NonZero;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::{mem, ptr};
#[cfg(target_pointer_width = "64")]
@ -15,14 +15,10 @@ const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS;
#[cfg_attr(test, linkage = "available_externally")]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys3pal3sgx3abi3tls14TLS_KEY_IN_USEE")]
static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT;
macro_rules! dup {
((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* ));
(() $($val:tt)*) => ([$($val),*])
}
// Specifying linkage/symbol name is solely to ensure a single instance between this crate and its unit tests
#[cfg_attr(test, linkage = "available_externally")]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys3pal3sgx3abi3tls14TLS_DESTRUCTORE")]
static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0)));
static TLS_DESTRUCTOR: [Atomic<usize>; TLS_KEYS] = [const { AtomicUsize::new(0) }; TLS_KEYS];
unsafe extern "C" {
fn get_tls_ptr() -> *const u8;
@ -84,7 +80,7 @@ impl<'a> Drop for ActiveTls<'a> {
impl Tls {
pub fn new() -> Tls {
Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) }
Tls { data: [const { Cell::new(ptr::null_mut()) }; TLS_KEYS] }
}
pub unsafe fn activate(&self) -> ActiveTls<'_> {

View file

@ -4,10 +4,10 @@ mod tests;
use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS};
use crate::iter::{Enumerate, Peekable};
use crate::slice::Iter;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
/// A bitset that can be used synchronously.
pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]);
pub(super) struct SyncBitset([Atomic<usize>; TLS_KEYS_BITSET_SIZE]);
pub(super) const SYNC_BITSET_INIT: SyncBitset =
SyncBitset([AtomicUsize::new(0), AtomicUsize::new(0)]);
@ -58,7 +58,7 @@ impl SyncBitset {
}
pub(super) struct SyncBitsetIter<'a> {
iter: Peekable<Enumerate<Iter<'a, AtomicUsize>>>,
iter: Peekable<Enumerate<Iter<'a, Atomic<usize>>>>,
elem_idx: usize,
}

View file

@ -6,7 +6,7 @@
#![allow(fuzzy_provenance_casts)] // FIXME: this entire module systematically confuses pointers and integers
use crate::io::ErrorKind;
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
pub mod abi;
mod libunwind_integration;
@ -46,7 +46,7 @@ pub fn unsupported_err() -> crate::io::Error {
/// what happens when `SGX_INEFFECTIVE_ERROR` is set to `true`. If it is
/// `false`, the behavior is the same as `unsupported`.
pub fn sgx_ineffective<T>(v: T) -> crate::io::Result<T> {
static SGX_INEFFECTIVE_ERROR: AtomicBool = AtomicBool::new(false);
static SGX_INEFFECTIVE_ERROR: Atomic<bool> = AtomicBool::new(false);
if SGX_INEFFECTIVE_ERROR.load(Ordering::Relaxed) {
Err(crate::io::const_error!(
ErrorKind::Uncategorized,

View file

@ -7,12 +7,12 @@ mod tests;
use crate::cell::UnsafeCell;
use crate::hint;
use crate::ops::{Deref, DerefMut};
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
#[derive(Default)]
pub struct SpinMutex<T> {
value: UnsafeCell<T>,
lock: AtomicBool,
lock: Atomic<bool>,
}
unsafe impl<T: Send> Send for SpinMutex<T> {}

View file

@ -22,7 +22,7 @@ use crate::os::uefi::{self};
use crate::path::Path;
use crate::ptr::NonNull;
use crate::slice;
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicPtr, Ordering};
use crate::sys_common::wstr::WStrUnits;
type BootInstallMultipleProtocolInterfaces =
@ -157,7 +157,7 @@ pub(crate) fn device_path_to_text(path: NonNull<device_path::Protocol>) -> io::R
Ok(path)
}
static LAST_VALID_HANDLE: AtomicPtr<crate::ffi::c_void> =
static LAST_VALID_HANDLE: Atomic<*mut crate::ffi::c_void> =
AtomicPtr::new(crate::ptr::null_mut());
if let Some(handle) = NonNull::new(LAST_VALID_HANDLE.load(Ordering::Acquire)) {
@ -269,7 +269,7 @@ impl OwnedDevicePath {
.ok_or_else(|| const_error!(io::ErrorKind::InvalidFilename, "invalid Device Path"))
}
static LAST_VALID_HANDLE: AtomicPtr<crate::ffi::c_void> =
static LAST_VALID_HANDLE: Atomic<*mut crate::ffi::c_void> =
AtomicPtr::new(crate::ptr::null_mut());
if let Some(handle) = NonNull::new(LAST_VALID_HANDLE.load(Ordering::Acquire)) {
@ -606,7 +606,7 @@ pub(crate) fn os_string_to_raw(s: &OsStr) -> Option<Box<[r_efi::efi::Char16]>> {
}
pub(crate) fn open_shell() -> Option<NonNull<shell::Protocol>> {
static LAST_VALID_HANDLE: AtomicPtr<crate::ffi::c_void> =
static LAST_VALID_HANDLE: Atomic<*mut crate::ffi::c_void> =
AtomicPtr::new(crate::ptr::null_mut());
if let Some(handle) = NonNull::new(LAST_VALID_HANDLE.load(Ordering::Acquire)) {

View file

@ -28,9 +28,9 @@ pub type RawOsError = usize;
use crate::io as std_io;
use crate::os::uefi;
use crate::ptr::NonNull;
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicPtr, Ordering};
static EXIT_BOOT_SERVICE_EVENT: AtomicPtr<crate::ffi::c_void> =
static EXIT_BOOT_SERVICE_EVENT: Atomic<*mut crate::ffi::c_void> =
AtomicPtr::new(crate::ptr::null_mut());
/// # SAFETY

View file

@ -121,7 +121,7 @@ pub(crate) mod instant_internal {
use super::*;
use crate::mem::MaybeUninit;
use crate::ptr::NonNull;
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicPtr, Ordering};
use crate::sys_common::mul_div_u64;
const NS_PER_SEC: u64 = 1_000_000_000;
@ -142,7 +142,7 @@ pub(crate) mod instant_internal {
Some(mul_div_u64(ts, NS_PER_SEC, freq))
}
static LAST_VALID_HANDLE: AtomicPtr<crate::ffi::c_void> =
static LAST_VALID_HANDLE: Atomic<*mut crate::ffi::c_void> =
AtomicPtr::new(crate::ptr::null_mut());
if let Some(handle) = NonNull::new(LAST_VALID_HANDLE.load(Ordering::Acquire)) {

View file

@ -8,16 +8,16 @@
target_os = "fuchsia",
))]
use crate::sync::atomic::AtomicU32;
use crate::sync::atomic::Atomic;
use crate::time::Duration;
/// An atomic for use as a futex that is at least 32-bits but may be larger
pub type Futex = AtomicU32;
pub type Futex = Atomic<Primitive>;
/// Must be the underlying type of Futex
pub type Primitive = u32;
/// An atomic for use as a futex that is at least 8-bits but may be larger.
pub type SmallFutex = AtomicU32;
pub type SmallFutex = Atomic<SmallPrimitive>;
/// Must be the underlying type of SmallFutex
pub type SmallPrimitive = u32;
@ -27,7 +27,7 @@ pub type SmallPrimitive = u32;
///
/// Returns false on timeout, and true in all other cases.
#[cfg(any(target_os = "linux", target_os = "android", target_os = "freebsd"))]
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
pub fn futex_wait(futex: &Atomic<u32>, expected: u32, timeout: Option<Duration>) -> bool {
use super::time::Timespec;
use crate::ptr::null;
use crate::sync::atomic::Ordering::Relaxed;
@ -60,7 +60,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
let umtx_timeout_ptr = umtx_timeout.as_ref().map_or(null(), |t| t as *const _);
let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| size_of_val(t));
libc::_umtx_op(
futex as *const AtomicU32 as *mut _,
futex as *const Atomic<u32> as *mut _,
libc::UMTX_OP_WAIT_UINT_PRIVATE,
expected as libc::c_ulong,
crate::ptr::without_provenance_mut(umtx_timeout_size),
@ -71,7 +71,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
// absolute time rather than a relative time.
libc::syscall(
libc::SYS_futex,
futex as *const AtomicU32,
futex as *const Atomic<u32>,
libc::FUTEX_WAIT_BITSET | libc::FUTEX_PRIVATE_FLAG,
expected,
timespec.as_ref().map_or(null(), |t| t as *const libc::timespec),
@ -99,16 +99,16 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
///
/// On some platforms, this always returns false.
#[cfg(any(target_os = "linux", target_os = "android"))]
pub fn futex_wake(futex: &AtomicU32) -> bool {
let ptr = futex as *const AtomicU32;
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
let ptr = futex as *const Atomic<u32>;
let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG;
unsafe { libc::syscall(libc::SYS_futex, ptr, op, 1) > 0 }
}
/// Wakes up all threads that are waiting on `futex_wait` on this futex.
#[cfg(any(target_os = "linux", target_os = "android"))]
pub fn futex_wake_all(futex: &AtomicU32) {
let ptr = futex as *const AtomicU32;
pub fn futex_wake_all(futex: &Atomic<u32>) {
let ptr = futex as *const Atomic<u32>;
let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG;
unsafe {
libc::syscall(libc::SYS_futex, ptr, op, i32::MAX);
@ -117,11 +117,11 @@ pub fn futex_wake_all(futex: &AtomicU32) {
// FreeBSD doesn't tell us how many threads are woken up, so this always returns false.
#[cfg(target_os = "freebsd")]
pub fn futex_wake(futex: &AtomicU32) -> bool {
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
use crate::ptr::null_mut;
unsafe {
libc::_umtx_op(
futex as *const AtomicU32 as *mut _,
futex as *const Atomic<u32> as *mut _,
libc::UMTX_OP_WAKE_PRIVATE,
1,
null_mut(),
@ -132,11 +132,11 @@ pub fn futex_wake(futex: &AtomicU32) -> bool {
}
#[cfg(target_os = "freebsd")]
pub fn futex_wake_all(futex: &AtomicU32) {
pub fn futex_wake_all(futex: &Atomic<u32>) {
use crate::ptr::null_mut;
unsafe {
libc::_umtx_op(
futex as *const AtomicU32 as *mut _,
futex as *const Atomic<u32> as *mut _,
libc::UMTX_OP_WAKE_PRIVATE,
i32::MAX as libc::c_ulong,
null_mut(),
@ -146,7 +146,7 @@ pub fn futex_wake_all(futex: &AtomicU32) {
}
#[cfg(target_os = "openbsd")]
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
pub fn futex_wait(futex: &Atomic<u32>, expected: u32, timeout: Option<Duration>) -> bool {
use super::time::Timespec;
use crate::ptr::{null, null_mut};
@ -157,7 +157,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
let r = unsafe {
libc::futex(
futex as *const AtomicU32 as *mut u32,
futex as *const Atomic<u32> as *mut u32,
libc::FUTEX_WAIT,
expected as i32,
timespec.as_ref().map_or(null(), |t| t as *const libc::timespec),
@ -169,20 +169,25 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
}
#[cfg(target_os = "openbsd")]
pub fn futex_wake(futex: &AtomicU32) -> bool {
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
use crate::ptr::{null, null_mut};
unsafe {
libc::futex(futex as *const AtomicU32 as *mut u32, libc::FUTEX_WAKE, 1, null(), null_mut())
> 0
libc::futex(
futex as *const Atomic<u32> as *mut u32,
libc::FUTEX_WAKE,
1,
null(),
null_mut(),
) > 0
}
}
#[cfg(target_os = "openbsd")]
pub fn futex_wake_all(futex: &AtomicU32) {
pub fn futex_wake_all(futex: &Atomic<u32>) {
use crate::ptr::{null, null_mut};
unsafe {
libc::futex(
futex as *const AtomicU32 as *mut u32,
futex as *const Atomic<u32> as *mut u32,
libc::FUTEX_WAKE,
i32::MAX,
null(),
@ -192,7 +197,7 @@ pub fn futex_wake_all(futex: &AtomicU32) {
}
#[cfg(target_os = "dragonfly")]
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
pub fn futex_wait(futex: &Atomic<u32>, expected: u32, timeout: Option<Duration>) -> bool {
// A timeout of 0 means infinite.
// We round smaller timeouts up to 1 millisecond.
// Overflows are rounded up to an infinite timeout.
@ -200,7 +205,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
timeout.and_then(|d| Some(i32::try_from(d.as_millis()).ok()?.max(1))).unwrap_or(0);
let r = unsafe {
libc::umtx_sleep(futex as *const AtomicU32 as *const i32, expected as i32, timeout_ms)
libc::umtx_sleep(futex as *const Atomic<u32> as *const i32, expected as i32, timeout_ms)
};
r == 0 || super::os::errno() != libc::ETIMEDOUT
@ -208,28 +213,28 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
// DragonflyBSD doesn't tell us how many threads are woken up, so this always returns false.
#[cfg(target_os = "dragonfly")]
pub fn futex_wake(futex: &AtomicU32) -> bool {
unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, 1) };
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
unsafe { libc::umtx_wakeup(futex as *const Atomic<u32> as *const i32, 1) };
false
}
#[cfg(target_os = "dragonfly")]
pub fn futex_wake_all(futex: &AtomicU32) {
unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, i32::MAX) };
pub fn futex_wake_all(futex: &Atomic<u32>) {
unsafe { libc::umtx_wakeup(futex as *const Atomic<u32> as *const i32, i32::MAX) };
}
#[cfg(target_os = "emscripten")]
unsafe extern "C" {
fn emscripten_futex_wake(addr: *const AtomicU32, count: libc::c_int) -> libc::c_int;
fn emscripten_futex_wake(addr: *const Atomic<u32>, count: libc::c_int) -> libc::c_int;
fn emscripten_futex_wait(
addr: *const AtomicU32,
addr: *const Atomic<u32>,
val: libc::c_uint,
max_wait_ms: libc::c_double,
) -> libc::c_int;
}
#[cfg(target_os = "emscripten")]
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
pub fn futex_wait(futex: &Atomic<u32>, expected: u32, timeout: Option<Duration>) -> bool {
unsafe {
emscripten_futex_wait(
futex,
@ -240,18 +245,18 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
}
#[cfg(target_os = "emscripten")]
pub fn futex_wake(futex: &AtomicU32) -> bool {
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
unsafe { emscripten_futex_wake(futex, 1) > 0 }
}
#[cfg(target_os = "emscripten")]
pub fn futex_wake_all(futex: &AtomicU32) {
pub fn futex_wake_all(futex: &Atomic<u32>) {
unsafe { emscripten_futex_wake(futex, i32::MAX) };
}
#[cfg(target_os = "fuchsia")]
pub mod zircon {
pub type zx_futex_t = crate::sync::atomic::AtomicU32;
pub type zx_futex_t = crate::sync::atomic::Atomic<u32>;
pub type zx_handle_t = u32;
pub type zx_status_t = i32;
pub type zx_time_t = i64;
@ -282,7 +287,7 @@ pub mod zircon {
}
#[cfg(target_os = "fuchsia")]
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
pub fn futex_wait(futex: &Atomic<u32>, expected: u32, timeout: Option<Duration>) -> bool {
// Sleep forever if the timeout is longer than fits in a i64.
let deadline = timeout
.and_then(|d| {
@ -293,19 +298,23 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
.unwrap_or(zircon::ZX_TIME_INFINITE);
unsafe {
zircon::zx_futex_wait(futex, AtomicU32::new(expected), zircon::ZX_HANDLE_INVALID, deadline)
!= zircon::ZX_ERR_TIMED_OUT
zircon::zx_futex_wait(
futex,
core::sync::atomic::AtomicU32::new(expected),
zircon::ZX_HANDLE_INVALID,
deadline,
) != zircon::ZX_ERR_TIMED_OUT
}
}
// Fuchsia doesn't tell us how many threads are woken up, so this always returns false.
#[cfg(target_os = "fuchsia")]
pub fn futex_wake(futex: &AtomicU32) -> bool {
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
unsafe { zircon::zx_futex_wake(futex, 1) };
false
}
#[cfg(target_os = "fuchsia")]
pub fn futex_wake_all(futex: &AtomicU32) {
pub fn futex_wake_all(futex: &Atomic<u32>) {
unsafe { zircon::zx_futex_wake(futex, u32::MAX) };
}

View file

@ -62,7 +62,7 @@ use crate::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use crate::os::unix::net::UnixStream;
use crate::process::{ChildStderr, ChildStdin, ChildStdout};
use crate::ptr;
use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicU8, Ordering};
use crate::sys::cvt;
use crate::sys::fs::CachedFileMetadata;
use crate::sys::weak::syscall;
@ -596,7 +596,7 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) ->
// Kernel prior to 4.5 don't have copy_file_range
// We store the availability in a global to avoid unnecessary syscalls
static HAS_COPY_FILE_RANGE: AtomicU8 = AtomicU8::new(NOT_PROBED);
static HAS_COPY_FILE_RANGE: Atomic<u8> = AtomicU8::new(NOT_PROBED);
let mut have_probed = match HAS_COPY_FILE_RANGE.load(Ordering::Relaxed) {
NOT_PROBED => false,
@ -721,8 +721,8 @@ enum SpliceMode {
/// performs splice or sendfile between file descriptors
/// Does _not_ fall back to a generic copy loop.
fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) -> CopyResult {
static HAS_SENDFILE: AtomicBool = AtomicBool::new(true);
static HAS_SPLICE: AtomicBool = AtomicBool::new(true);
static HAS_SENDFILE: Atomic<bool> = AtomicBool::new(true);
static HAS_SPLICE: Atomic<bool> = AtomicBool::new(true);
// Android builds use feature level 14, but the libc wrapper for splice is
// gated on feature level 21+, so we have to invoke the syscall directly.

View file

@ -204,7 +204,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
target_os = "vxworks",
target_os = "vita",
)))]
static ON_BROKEN_PIPE_FLAG_USED: crate::sync::atomic::AtomicBool =
static ON_BROKEN_PIPE_FLAG_USED: crate::sync::atomic::Atomic<bool> =
crate::sync::atomic::AtomicBool::new(false);
#[cfg(not(any(

View file

@ -49,7 +49,7 @@ mod imp {
use crate::cell::Cell;
use crate::ops::Range;
use crate::sync::OnceLock;
use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
use crate::sys::pal::unix::os;
use crate::{io, mem, ptr, thread};
@ -118,9 +118,9 @@ mod imp {
}
}
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
static MAIN_ALTSTACK: AtomicPtr<libc::c_void> = AtomicPtr::new(ptr::null_mut());
static NEED_ALTSTACK: AtomicBool = AtomicBool::new(false);
static PAGE_SIZE: Atomic<usize> = AtomicUsize::new(0);
static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut());
static NEED_ALTSTACK: Atomic<bool> = AtomicBool::new(false);
/// # Safety
/// Must be called only once

View file

@ -24,7 +24,7 @@
use crate::ffi::CStr;
use crate::marker::PhantomData;
use crate::sync::atomic::{self, AtomicPtr, Ordering};
use crate::sync::atomic::{self, Atomic, AtomicPtr, Ordering};
use crate::{mem, ptr};
// We can use true weak linkage on ELF targets.
@ -80,7 +80,7 @@ pub(crate) macro dlsym {
}
pub(crate) struct DlsymWeak<F> {
name: &'static str,
func: AtomicPtr<libc::c_void>,
func: Atomic<*mut libc::c_void>,
_marker: PhantomData<F>,
}

View file

@ -3,16 +3,16 @@ use core::arch::wasm32 as wasm;
#[cfg(target_arch = "wasm64")]
use core::arch::wasm64 as wasm;
use crate::sync::atomic::AtomicU32;
use crate::sync::atomic::Atomic;
use crate::time::Duration;
/// An atomic for use as a futex that is at least 32-bits but may be larger
pub type Futex = AtomicU32;
pub type Futex = Atomic<Primitive>;
/// Must be the underlying type of Futex
pub type Primitive = u32;
/// An atomic for use as a futex that is at least 8-bits but may be larger.
pub type SmallFutex = AtomicU32;
pub type SmallFutex = Atomic<SmallPrimitive>;
/// Must be the underlying type of SmallFutex
pub type SmallPrimitive = u32;
@ -21,11 +21,14 @@ pub type SmallPrimitive = u32;
/// Returns directly if the futex doesn't hold the expected value.
///
/// Returns false on timeout, and true in all other cases.
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
pub fn futex_wait(futex: &Atomic<u32>, expected: u32, timeout: Option<Duration>) -> bool {
let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1);
unsafe {
wasm::memory_atomic_wait32(futex as *const AtomicU32 as *mut i32, expected as i32, timeout)
< 2
wasm::memory_atomic_wait32(
futex as *const Atomic<u32> as *mut i32,
expected as i32,
timeout,
) < 2
}
}
@ -33,13 +36,13 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
///
/// Returns true if this actually woke up such a thread,
/// or false if no thread was waiting on this futex.
pub fn futex_wake(futex: &AtomicU32) -> bool {
unsafe { wasm::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, 1) > 0 }
pub fn futex_wake(futex: &Atomic<u32>) -> bool {
unsafe { wasm::memory_atomic_notify(futex as *const Atomic<u32> as *mut i32, 1) > 0 }
}
/// Wakes up all threads that are waiting on `futex_wait` on this futex.
pub fn futex_wake_all(futex: &AtomicU32) {
pub fn futex_wake_all(futex: &Atomic<u32>) {
unsafe {
wasm::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, i32::MAX as u32);
wasm::memory_atomic_notify(futex as *const Atomic<u32> as *mut i32, i32::MAX as u32);
}
}

View file

@ -145,7 +145,7 @@ macro_rules! compat_fn_with_fallback {
use super::*;
use crate::mem;
use crate::ffi::CStr;
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicPtr, Ordering};
use crate::sys::compat::Module;
type F = unsafe extern "system" fn($($argtype),*) -> $rettype;
@ -155,7 +155,7 @@ macro_rules! compat_fn_with_fallback {
/// When that is called it attempts to load the requested symbol.
/// If it succeeds, `PTR` is set to the address of that symbol.
/// If it fails, then `PTR` is set to `fallback`.
static PTR: AtomicPtr<c_void> = AtomicPtr::new(load as *mut _);
static PTR: Atomic<*mut c_void> = AtomicPtr::new(load as *mut _);
unsafe extern "system" fn load($($argname: $argtype),*) -> $rettype {
unsafe {
@ -212,9 +212,9 @@ macro_rules! compat_fn_optional {
use crate::ffi::c_void;
use crate::mem;
use crate::ptr::{self, NonNull};
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicPtr, Ordering};
pub(in crate::sys) static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
pub(in crate::sys) static PTR: Atomic<*mut c_void> = AtomicPtr::new(ptr::null_mut());
type F = unsafe extern "system" fn($($argtype),*) $(-> $rettype)?;

View file

@ -1,8 +1,8 @@
use core::ffi::c_void;
use core::ptr;
use core::sync::atomic::{
AtomicBool, AtomicI8, AtomicI16, AtomicI32, AtomicI64, AtomicIsize, AtomicPtr, AtomicU8,
AtomicU16, AtomicU32, AtomicU64, AtomicUsize,
Atomic, AtomicBool, AtomicI8, AtomicI16, AtomicI32, AtomicI64, AtomicIsize, AtomicPtr,
AtomicU8, AtomicU16, AtomicU32, AtomicU64, AtomicUsize,
};
use core::time::Duration;
@ -10,12 +10,12 @@ use super::api::{self, WinError};
use crate::sys::{c, dur2timeout};
/// An atomic for use as a futex that is at least 32-bits but may be larger
pub type Futex = AtomicU32;
pub type Futex = Atomic<Primitive>;
/// Must be the underlying type of Futex
pub type Primitive = u32;
/// An atomic for use as a futex that is at least 8-bits but may be larger.
pub type SmallFutex = AtomicU8;
pub type SmallFutex = Atomic<SmallPrimitive>;
/// Must be the underlying type of SmallFutex
pub type SmallPrimitive = u8;
@ -47,10 +47,10 @@ unsafe_waitable_int! {
(usize, AtomicUsize),
}
unsafe impl<T> Waitable for *const T {
type Futex = AtomicPtr<T>;
type Futex = Atomic<*mut T>;
}
unsafe impl<T> Waitable for *mut T {
type Futex = AtomicPtr<T>;
type Futex = Atomic<*mut T>;
}
unsafe impl<T> Futexable for AtomicPtr<T> {}

View file

@ -3,8 +3,8 @@ use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::os::windows::prelude::*;
use crate::path::Path;
use crate::random::{DefaultRandomSource, Random};
use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::Relaxed;
use crate::sync::atomic::{Atomic, AtomicUsize};
use crate::sys::c;
use crate::sys::fs::{File, OpenOptions};
use crate::sys::handle::Handle;
@ -192,7 +192,7 @@ pub fn spawn_pipe_relay(
}
fn random_number() -> usize {
static N: AtomicUsize = AtomicUsize::new(0);
static N: Atomic<usize> = AtomicUsize::new(0);
loop {
if N.load(Relaxed) != 0 {
return N.fetch_add(1, Relaxed);

View file

@ -164,7 +164,7 @@ fn intervals2dur(intervals: u64) -> Duration {
mod perf_counter {
use super::NANOS_PER_SEC;
use crate::sync::atomic::{AtomicU64, Ordering};
use crate::sync::atomic::{Atomic, AtomicU64, Ordering};
use crate::sys::{c, cvt};
use crate::sys_common::mul_div_u64;
use crate::time::Duration;
@ -199,7 +199,7 @@ mod perf_counter {
// uninitialized. Storing this as a single `AtomicU64` allows us to use
// `Relaxed` operations, as we are only interested in the effects on a
// single memory location.
static FREQUENCY: AtomicU64 = AtomicU64::new(0);
static FREQUENCY: Atomic<u64> = AtomicU64::new(0);
let cached = FREQUENCY.load(Ordering::Relaxed);
// If a previous thread has filled in this global state, use that.

View file

@ -4,12 +4,12 @@ use crate::ffi::{OsStr, OsString};
use crate::marker::PhantomData;
use crate::os::xous::ffi::Error as XousError;
use crate::path::{self, PathBuf};
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sync::atomic::{Atomic, AtomicPtr, Ordering};
use crate::{fmt, io};
pub(crate) mod params;
static PARAMS_ADDRESS: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
static PARAMS_ADDRESS: Atomic<*mut u8> = AtomicPtr::new(core::ptr::null_mut());
#[cfg(not(test))]
#[cfg(feature = "panic_unwind")]

View file

@ -442,7 +442,7 @@ impl Command {
envp: Option<&CStringArray>,
) -> io::Result<Option<Process>> {
#[cfg(target_os = "linux")]
use core::sync::atomic::{AtomicU8, Ordering};
use core::sync::atomic::{Atomic, AtomicU8, Ordering};
use crate::mem::MaybeUninit;
use crate::sys::{self, cvt_nz, on_broken_pipe_flag_used};
@ -475,7 +475,7 @@ impl Command {
fn pidfd_getpid(pidfd: libc::c_int) -> libc::c_int;
);
static PIDFD_SUPPORTED: AtomicU8 = AtomicU8::new(0);
static PIDFD_SUPPORTED: Atomic<u8> = AtomicU8::new(0);
const UNKNOWN: u8 = 0;
const SPAWN: u8 = 1;
// Obtaining a pidfd via the fork+exec path might work

View file

@ -64,8 +64,8 @@ use crate::fs::File;
use crate::io::Read;
use crate::os::fd::AsRawFd;
use crate::sync::OnceLock;
use crate::sync::atomic::AtomicBool;
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::sync::atomic::{Atomic, AtomicBool};
use crate::sys::pal::os::errno;
use crate::sys::pal::weak::syscall;
@ -81,9 +81,9 @@ fn getrandom(mut bytes: &mut [u8], insecure: bool) {
) -> libc::ssize_t;
);
static GETRANDOM_AVAILABLE: AtomicBool = AtomicBool::new(true);
static GRND_INSECURE_AVAILABLE: AtomicBool = AtomicBool::new(true);
static URANDOM_READY: AtomicBool = AtomicBool::new(false);
static GETRANDOM_AVAILABLE: Atomic<bool> = AtomicBool::new(true);
static GRND_INSECURE_AVAILABLE: Atomic<bool> = AtomicBool::new(true);
static URANDOM_READY: Atomic<bool> = AtomicBool::new(false);
static DEVICE: OnceLock<File> = OnceLock::new();
if GETRANDOM_AVAILABLE.load(Relaxed) {

View file

@ -1,7 +1,7 @@
use crate::sync::atomic::AtomicBool;
use crate::sync::atomic::Ordering::Relaxed;
use crate::sync::atomic::{Atomic, AtomicBool};
static RNG_INIT: AtomicBool = AtomicBool::new(false);
static RNG_INIT: Atomic<bool> = AtomicBool::new(false);
pub fn fill_bytes(mut bytes: &mut [u8]) {
while !RNG_INIT.load(Relaxed) {

View file

@ -2,15 +2,15 @@
use crate::pin::Pin;
use crate::ptr;
use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::Relaxed;
use crate::sync::atomic::{Atomic, AtomicUsize};
use crate::sys::pal::sync as pal;
use crate::sys::sync::{Mutex, OnceBox};
use crate::time::{Duration, Instant};
pub struct Condvar {
cvar: OnceBox<pal::Condvar>,
mutex: AtomicUsize,
mutex: Atomic<usize>,
}
impl Condvar {

View file

@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use core::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::os::xous::ffi::{blocking_scalar, scalar};
use crate::os::xous::services::{TicktimerScalar, ticktimer_server};
@ -11,8 +11,8 @@ use crate::time::Duration;
const NOTIFY_TRIES: usize = 3;
pub struct Condvar {
counter: AtomicUsize,
timed_out: AtomicUsize,
counter: Atomic<usize>,
timed_out: Atomic<usize>,
}
unsafe impl Send for Condvar {}

View file

@ -37,8 +37,8 @@
//!
//! [mutex in Fuchsia's libsync]: https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/system/ulib/sync/mutex.c
use crate::sync::atomic::AtomicU32;
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::sync::atomic::{Atomic, AtomicU32};
use crate::sys::futex::zircon::{
ZX_ERR_BAD_HANDLE, ZX_ERR_BAD_STATE, ZX_ERR_INVALID_ARGS, ZX_ERR_TIMED_OUT, ZX_ERR_WRONG_TYPE,
ZX_OK, ZX_TIME_INFINITE, zx_futex_wait, zx_futex_wake_single_owner, zx_handle_t,
@ -52,7 +52,7 @@ const CONTESTED_BIT: u32 = 1;
const UNLOCKED: u32 = 0;
pub struct Mutex {
futex: AtomicU32,
futex: Atomic<u32>,
}
#[inline]

View file

@ -1,7 +1,7 @@
use crate::os::xous::ffi::{blocking_scalar, do_yield};
use crate::os::xous::services::{TicktimerScalar, ticktimer_server};
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::sync::atomic::{AtomicBool, AtomicUsize};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize};
pub struct Mutex {
/// The "locked" value indicates how many threads are waiting on this
@ -14,12 +14,12 @@ pub struct Mutex {
/// for a lock, or it is locked for long periods of time. Rather than
/// spinning, these locks send a Message to the ticktimer server
/// requesting that they be woken up when a lock is unlocked.
locked: AtomicUsize,
locked: Atomic<usize>,
/// Whether this Mutex ever was contended, and therefore made a trip
/// to the ticktimer server. If this was never set, then we were never
/// on the slow path and can skip deregistering the mutex.
contended: AtomicBool,
contended: Atomic<bool>,
}
impl Mutex {

View file

@ -57,7 +57,7 @@
use crate::cell::Cell;
use crate::sync::atomic::Ordering::{AcqRel, Acquire, Release};
use crate::sync::atomic::{AtomicBool, AtomicPtr};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr};
use crate::sync::poison::once::ExclusiveState;
use crate::thread::{self, Thread};
use crate::{fmt, ptr, sync as public};
@ -65,7 +65,7 @@ use crate::{fmt, ptr, sync as public};
type StateAndQueue = *mut ();
pub struct Once {
state_and_queue: AtomicPtr<()>,
state_and_queue: Atomic<*mut ()>,
}
pub struct OnceState {
@ -94,7 +94,7 @@ const QUEUE_MASK: usize = !STATE_MASK;
#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
struct Waiter {
thread: Thread,
signaled: AtomicBool,
signaled: Atomic<bool>,
next: Cell<*const Waiter>,
}
@ -102,7 +102,7 @@ struct Waiter {
// Every node is a struct on the stack of a waiting thread.
// Will wake up the waiters when it gets dropped, i.e. also on panic.
struct WaiterQueue<'a> {
state_and_queue: &'a AtomicPtr<()>,
state_and_queue: &'a Atomic<*mut ()>,
set_state_on_drop_to: StateAndQueue,
}
@ -232,7 +232,7 @@ impl Once {
}
fn wait(
state_and_queue: &AtomicPtr<()>,
state_and_queue: &Atomic<*mut ()>,
mut current: StateAndQueue,
return_on_poisoned: bool,
) -> StateAndQueue {

View file

@ -8,11 +8,11 @@
use crate::mem::replace;
use crate::pin::Pin;
use crate::ptr::null_mut;
use crate::sync::atomic::AtomicPtr;
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::sync::atomic::{Atomic, AtomicPtr};
pub(crate) struct OnceBox<T> {
ptr: AtomicPtr<T>,
ptr: Atomic<*mut T>,
}
impl<T> OnceBox<T> {

View file

@ -117,11 +117,11 @@ use crate::hint::spin_loop;
use crate::mem;
use crate::ptr::{self, NonNull, null_mut, without_provenance_mut};
use crate::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
use crate::sync::atomic::{AtomicBool, AtomicPtr};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr};
use crate::thread::{self, Thread};
/// The atomic lock state.
type AtomicState = AtomicPtr<()>;
type AtomicState = Atomic<State>;
/// The inner lock state.
type State = *mut ();
@ -181,11 +181,11 @@ struct Node {
tail: AtomicLink,
write: bool,
thread: OnceCell<Thread>,
completed: AtomicBool,
completed: Atomic<bool>,
}
/// An atomic node pointer with relaxed operations.
struct AtomicLink(AtomicPtr<Node>);
struct AtomicLink(Atomic<*mut Node>);
impl AtomicLink {
fn new(v: Option<NonNull<Node>>) -> AtomicLink {

View file

@ -13,8 +13,8 @@
#![allow(non_camel_case_types)]
use crate::pin::Pin;
use crate::sync::atomic::AtomicI8;
use crate::sync::atomic::Ordering::{Acquire, Release};
use crate::sync::atomic::{Atomic, AtomicI8};
use crate::time::Duration;
type dispatch_semaphore_t = *mut crate::ffi::c_void;
@ -38,7 +38,7 @@ const PARKED: i8 = -1;
pub struct Parker {
semaphore: dispatch_semaphore_t,
state: AtomicI8,
state: Atomic<i8>,
}
unsafe impl Sync for Parker {}

View file

@ -10,12 +10,12 @@
use crate::cell::UnsafeCell;
use crate::pin::Pin;
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::sync::atomic::{AtomicI8, fence};
use crate::sync::atomic::{Atomic, AtomicI8, fence};
use crate::sys::thread_parking::{ThreadId, current, park, park_timeout, unpark};
use crate::time::Duration;
pub struct Parker {
state: AtomicI8,
state: Atomic<i8>,
tid: UnsafeCell<Option<ThreadId>>,
}

View file

@ -1,8 +1,8 @@
//! Thread parking without `futex` using the `pthread` synchronization primitives.
use crate::pin::Pin;
use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::sync::atomic::{Atomic, AtomicUsize};
use crate::sys::pal::sync::{Condvar, Mutex};
use crate::time::Duration;
@ -11,7 +11,7 @@ const PARKED: usize = 1;
const NOTIFIED: usize = 2;
pub struct Parker {
state: AtomicUsize,
state: Atomic<usize>,
lock: Mutex,
cvar: Condvar,
}

View file

@ -60,13 +60,13 @@
use core::ffi::c_void;
use crate::pin::Pin;
use crate::sync::atomic::AtomicI8;
use crate::sync::atomic::Ordering::{Acquire, Release};
use crate::sync::atomic::{Atomic, AtomicI8};
use crate::sys::{c, dur2timeout};
use crate::time::Duration;
pub struct Parker {
state: AtomicI8,
state: Atomic<i8>,
}
const PARKED: i8 = -1;
@ -186,8 +186,8 @@ impl Parker {
mod keyed_events {
use core::pin::Pin;
use core::ptr;
use core::sync::atomic::AtomicPtr;
use core::sync::atomic::Ordering::{Acquire, Relaxed};
use core::sync::atomic::{Atomic, AtomicPtr};
use core::time::Duration;
use super::{EMPTY, NOTIFIED, Parker};
@ -244,7 +244,7 @@ mod keyed_events {
fn keyed_event_handle() -> c::HANDLE {
const INVALID: c::HANDLE = ptr::without_provenance_mut(!0);
static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(INVALID);
static HANDLE: Atomic<*mut crate::ffi::c_void> = AtomicPtr::new(INVALID);
match HANDLE.load(Relaxed) {
INVALID => {
let mut handle = c::INVALID_HANDLE_VALUE;

View file

@ -2,8 +2,8 @@ use crate::os::xous::ffi::{blocking_scalar, scalar};
use crate::os::xous::services::{TicktimerScalar, ticktimer_server};
use crate::pin::Pin;
use crate::ptr;
use crate::sync::atomic::AtomicI8;
use crate::sync::atomic::Ordering::{Acquire, Release};
use crate::sync::atomic::{Atomic, AtomicI8};
use crate::time::Duration;
const NOTIFIED: i8 = 1;
@ -11,7 +11,7 @@ const EMPTY: i8 = 0;
const PARKED: i8 = -1;
pub struct Parker {
state: AtomicI8,
state: Atomic<i8>,
}
impl Parker {

View file

@ -6,7 +6,7 @@
//! should be more lightweight and avoids circular dependencies with the rest of
//! `std`.
use crate::sync::atomic::{self, AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
/// A type for TLS keys that are statically allocated.
///
@ -14,7 +14,7 @@ use crate::sync::atomic::{self, AtomicUsize, Ordering};
/// dependencies with the rest of `std`.
pub struct LazyKey {
/// Inner static TLS key (internals).
key: AtomicUsize,
key: Atomic<usize>,
/// Destructor for the TLS value.
dtor: Option<unsafe extern "C" fn(*mut u8)>,
}
@ -31,7 +31,7 @@ const KEY_SENTVAL: usize = libc::PTHREAD_KEYS_MAX + 1;
impl LazyKey {
pub const fn new(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> LazyKey {
LazyKey { key: atomic::AtomicUsize::new(KEY_SENTVAL), dtor }
LazyKey { key: AtomicUsize::new(KEY_SENTVAL), dtor }
}
#[inline]

View file

@ -27,7 +27,7 @@
use crate::cell::UnsafeCell;
use crate::ptr;
use crate::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
use crate::sync::atomic::{AtomicPtr, AtomicU32};
use crate::sync::atomic::{Atomic, AtomicPtr, AtomicU32};
use crate::sys::c;
use crate::sys::thread_local::guard;
@ -38,9 +38,9 @@ pub struct LazyKey {
/// The key value shifted up by one. Since TLS_OUT_OF_INDEXES == u32::MAX
/// is not a valid key value, this allows us to use zero as sentinel value
/// without risking overflow.
key: AtomicU32,
key: Atomic<Key>,
dtor: Option<Dtor>,
next: AtomicPtr<LazyKey>,
next: Atomic<*mut LazyKey>,
/// Currently, destructors cannot be unregistered, so we cannot use racy
/// initialization for keys. Instead, we need synchronize initialization.
/// Use the Windows-provided `Once` since it does not require TLS.
@ -142,7 +142,7 @@ pub unsafe fn get(key: Key) -> *mut u8 {
unsafe { c::TlsGetValue(key).cast() }
}
static DTORS: AtomicPtr<LazyKey> = AtomicPtr::new(ptr::null_mut());
static DTORS: Atomic<*mut LazyKey> = AtomicPtr::new(ptr::null_mut());
/// Should only be called once per key, otherwise loops or breaks may occur in
/// the linked list.

View file

@ -42,7 +42,7 @@ use crate::mem::ManuallyDrop;
use crate::os::xous::ffi::{MemoryFlags, map_memory, unmap_memory};
use crate::ptr;
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::sync::atomic::{AtomicPtr, AtomicUsize};
use crate::sync::atomic::{Atomic, AtomicPtr, AtomicUsize};
pub type Key = usize;
pub type Dtor = unsafe extern "C" fn(*mut u8);
@ -52,19 +52,19 @@ const TLS_MEMORY_SIZE: usize = 4096;
/// TLS keys start at `1`. Index `0` is unused
#[cfg(not(test))]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key13TLS_KEY_INDEXE")]
static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1);
static TLS_KEY_INDEX: Atomic<usize> = AtomicUsize::new(1);
#[cfg(not(test))]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key9DTORSE")]
static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
static DTORS: Atomic<*mut Node> = AtomicPtr::new(ptr::null_mut());
#[cfg(test)]
unsafe extern "Rust" {
#[link_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key13TLS_KEY_INDEXE"]
static TLS_KEY_INDEX: AtomicUsize;
static TLS_KEY_INDEX: Atomic<usize>;
#[link_name = "_ZN16__rust_internals3std3sys4xous16thread_local_key9DTORSE"]
static DTORS: AtomicPtr<Node>;
static DTORS: Atomic<*mut Node>;
}
fn tls_ptr_addr() -> *mut *mut u8 {

View file

@ -166,7 +166,7 @@ use crate::mem::{self, ManuallyDrop, forget};
use crate::num::NonZero;
use crate::pin::Pin;
use crate::sync::Arc;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
use crate::sys::sync::Parker;
use crate::sys::thread as imp;
use crate::sys_common::{AsInner, IntoInner};
@ -481,7 +481,7 @@ impl Builder {
let Builder { name, stack_size, no_hooks } = self;
let stack_size = stack_size.unwrap_or_else(|| {
static MIN: AtomicUsize = AtomicUsize::new(0);
static MIN: Atomic<usize> = AtomicUsize::new(0);
match MIN.load(Ordering::Relaxed) {
0 => {}
@ -1195,9 +1195,9 @@ impl ThreadId {
cfg_if::cfg_if! {
if #[cfg(target_has_atomic = "64")] {
use crate::sync::atomic::AtomicU64;
use crate::sync::atomic::{Atomic, AtomicU64};
static COUNTER: AtomicU64 = AtomicU64::new(0);
static COUNTER: Atomic<u64> = AtomicU64::new(0);
let mut last = COUNTER.load(Ordering::Relaxed);
loop {
@ -1302,10 +1302,10 @@ pub(crate) mod main_thread {
cfg_if::cfg_if! {
if #[cfg(target_has_atomic = "64")] {
use super::ThreadId;
use crate::sync::atomic::AtomicU64;
use crate::sync::atomic::{Atomic, AtomicU64};
use crate::sync::atomic::Ordering::Relaxed;
static MAIN: AtomicU64 = AtomicU64::new(0);
static MAIN: Atomic<u64> = AtomicU64::new(0);
pub(super) fn get() -> Option<ThreadId> {
ThreadId::from_u64(MAIN.load(Relaxed))
@ -1319,10 +1319,10 @@ pub(crate) mod main_thread {
} else {
use super::ThreadId;
use crate::mem::MaybeUninit;
use crate::sync::atomic::AtomicBool;
use crate::sync::atomic::{Atomic, AtomicBool};
use crate::sync::atomic::Ordering::{Acquire, Release};
static INIT: AtomicBool = AtomicBool::new(false);
static INIT: Atomic<bool> = AtomicBool::new(false);
static mut MAIN: MaybeUninit<ThreadId> = MaybeUninit::uninit();
pub(super) fn get() -> Option<ThreadId> {

View file

@ -2,7 +2,7 @@ use super::{Builder, JoinInner, Result, Thread, current_or_unnamed};
use crate::marker::PhantomData;
use crate::panic::{AssertUnwindSafe, catch_unwind, resume_unwind};
use crate::sync::Arc;
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering};
use crate::{fmt, io};
/// A scope to spawn scoped threads in.
@ -35,8 +35,8 @@ pub struct Scope<'scope, 'env: 'scope> {
pub struct ScopedJoinHandle<'scope, T>(JoinInner<'scope, T>);
pub(super) struct ScopeData {
num_running_threads: AtomicUsize,
a_thread_panicked: AtomicBool,
num_running_threads: Atomic<usize>,
a_thread_panicked: Atomic<bool>,
main_thread: Thread,
}