Auto merge of #104101 - betrusted-io:xous-libstd-initial, r=bjorn3

Add initial libstd support for Xous

This patchset adds some minimal support to the tier-3 target `riscv32imac-unknown-xous-elf`. The following features are supported:

* alloc
* thread creation and joining
* thread sleeping
* thread_local
* panic_abort
* mutex
* condvar
* stdout

Additionally, internal support for the various Xous primitives surrounding IPC have been added as part of the Xous FFI. These may be exposed as part of `std::os::xous::ffi` in the future, however for now they are not public.

This represents the minimum viable product. A future patchset will add support for networking and filesystem support.
This commit is contained in:
bors 2023-09-19 07:38:20 +00:00
commit ae9c330629
32 changed files with 2484 additions and 10 deletions

View file

@ -44,6 +44,9 @@ cfg_if::cfg_if! {
} else if #[cfg(target_family = "wasm")] {
mod wasm;
pub use self::wasm::*;
} else if #[cfg(target_os = "xous")] {
mod xous;
pub use self::xous::*;
} else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
mod sgx;
pub use self::sgx::*;

View file

@ -0,0 +1,62 @@
use crate::alloc::{GlobalAlloc, Layout, System};
static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new();
#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling malloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.malloc(layout.size(), layout.align()) }
}
#[inline]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling calloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.calloc(layout.size(), layout.align()) }
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling free() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) }
}
#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling realloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) }
}
}
mod lock {
use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
static LOCKED: AtomicI32 = AtomicI32::new(0);
pub struct DropLock;
pub fn lock() -> DropLock {
loop {
if LOCKED.swap(1, SeqCst) == 0 {
return DropLock;
}
crate::os::xous::ffi::do_yield();
}
}
impl Drop for DropLock {
fn drop(&mut self) {
let r = LOCKED.swap(0, SeqCst);
debug_assert_eq!(r, 1);
}
}
}

View file

@ -0,0 +1,111 @@
use super::mutex::Mutex;
use crate::os::xous::ffi::{blocking_scalar, scalar};
use crate::os::xous::services::ticktimer_server;
use crate::sync::Mutex as StdMutex;
use crate::time::Duration;
// The implementation is inspired by Andrew D. Birrell's paper
// "Implementing Condition Variables with Semaphores"
pub struct Condvar {
counter: StdMutex<usize>,
}
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
impl Condvar {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Condvar {
Condvar { counter: StdMutex::new(0) }
}
pub fn notify_one(&self) {
let mut counter = self.counter.lock().unwrap();
if *counter <= 0 {
return;
} else {
*counter -= 1;
}
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), 1).into(),
);
drop(counter);
result.expect("failure to send NotifyCondition command");
}
pub fn notify_all(&self) {
let mut counter = self.counter.lock().unwrap();
if *counter <= 0 {
return;
}
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), *counter)
.into(),
);
*counter = 0;
drop(counter);
result.expect("failure to send NotifyCondition command");
}
fn index(&self) -> usize {
self as *const Condvar as usize
}
pub unsafe fn wait(&self, mutex: &Mutex) {
let mut counter = self.counter.lock().unwrap();
*counter += 1;
unsafe { mutex.unlock() };
drop(counter);
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), 0).into(),
);
unsafe { mutex.lock() };
result.expect("Ticktimer: failure to send WaitForCondition command");
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let mut counter = self.counter.lock().unwrap();
*counter += 1;
unsafe { mutex.unlock() };
drop(counter);
let mut millis = dur.as_millis() as usize;
if millis == 0 {
millis = 1;
}
let result = blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), millis)
.into(),
);
unsafe { mutex.lock() };
let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0;
// If we awoke due to a timeout, decrement the wake count, as that would not have
// been done in the `notify()` call.
if !result {
*self.counter.lock().unwrap() -= 1;
}
result
}
}
impl Drop for Condvar {
fn drop(&mut self) {
scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::FreeCondition(self.index()).into(),
)
.ok();
}
}

View file

@ -0,0 +1,7 @@
mod condvar;
mod mutex;
mod rwlock;
pub use condvar::*;
pub use mutex::*;
pub use rwlock::*;

View file

@ -0,0 +1,116 @@
use crate::os::xous::ffi::{blocking_scalar, do_yield, scalar};
use crate::os::xous::services::ticktimer_server;
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst};
pub struct Mutex {
/// The "locked" value indicates how many threads are waiting on this
/// Mutex. Possible values are:
/// 0: The lock is unlocked
/// 1: The lock is locked and uncontended
/// >=2: The lock is locked and contended
///
/// A lock is "contended" when there is more than one thread waiting
/// for a lock, or it is locked for long periods of time. Rather than
/// spinning, these locks send a Message to the ticktimer server
/// requesting that they be woken up when a lock is unlocked.
locked: AtomicUsize,
/// Whether this Mutex ever was contended, and therefore made a trip
/// to the ticktimer server. If this was never set, then we were never
/// on the slow path and can skip deregistering the mutex.
contended: AtomicBool,
}
impl Mutex {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Mutex {
Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) }
}
fn index(&self) -> usize {
self as *const Mutex as usize
}
#[inline]
pub unsafe fn lock(&self) {
// Try multiple times to acquire the lock without resorting to the ticktimer
// server. For locks that are held for a short amount of time, this will
// result in the ticktimer server never getting invoked. The `locked` value
// will be either 0 or 1.
for _attempts in 0..3 {
if unsafe { self.try_lock() } {
return;
}
do_yield();
}
// Try one more time to lock. If the lock is released between the previous code and
// here, then the inner `locked` value will be 1 at the end of this. If it was not
// locked, then the value will be more than 1, for example if there are multiple other
// threads waiting on this lock.
if unsafe { self.try_lock_or_poison() } {
return;
}
// When this mutex is dropped, we will need to deregister it with the server.
self.contended.store(true, Relaxed);
// The lock is now "contended". When the lock is released, a Message will get sent to the
// ticktimer server to wake it up. Note that this may already have happened, so the actual
// value of `lock` may be anything (0, 1, 2, ...).
blocking_scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(),
)
.expect("failure to send LockMutex command");
}
#[inline]
pub unsafe fn unlock(&self) {
let prev = self.locked.fetch_sub(1, SeqCst);
// If the previous value was 1, then this was a "fast path" unlock, so no
// need to involve the Ticktimer server
if prev == 1 {
return;
}
// If it was 0, then something has gone seriously wrong and the counter
// has just wrapped around.
if prev == 0 {
panic!("mutex lock count underflowed");
}
// Unblock one thread that is waiting on this message.
scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::UnlockMutex(self.index()).into(),
)
.expect("failure to send UnlockMutex command");
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
}
#[inline]
pub unsafe fn try_lock_or_poison(&self) -> bool {
self.locked.fetch_add(1, SeqCst) == 0
}
}
impl Drop for Mutex {
fn drop(&mut self) {
// If there was Mutex contention, then we involved the ticktimer. Free
// the resources associated with this Mutex as it is deallocated.
if self.contended.load(Relaxed) {
scalar(
ticktimer_server(),
crate::os::xous::services::TicktimerScalar::FreeMutex(self.index()).into(),
)
.ok();
}
}
}

View file

@ -0,0 +1,72 @@
use crate::os::xous::ffi::do_yield;
use crate::sync::atomic::{AtomicIsize, Ordering::SeqCst};
pub struct RwLock {
/// The "mode" value indicates how many threads are waiting on this
/// Mutex. Possible values are:
/// -1: The lock is locked for writing
/// 0: The lock is unlocked
/// >=1: The lock is locked for reading
///
/// This currently spins waiting for the lock to be freed. An
/// optimization would be to involve the ticktimer server to
/// coordinate unlocks.
mode: AtomicIsize,
}
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
impl RwLock {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> RwLock {
RwLock { mode: AtomicIsize::new(0) }
}
#[inline]
pub unsafe fn read(&self) {
while !unsafe { self.try_read() } {
do_yield();
}
}
#[inline]
pub unsafe fn try_read(&self) -> bool {
// Non-atomically determine the current value.
let current = self.mode.load(SeqCst);
// If it's currently locked for writing, then we cannot read.
if current < 0 {
return false;
}
// Attempt to lock. If the `current` value has changed, then this
// operation will fail and we will not obtain the lock even if we
// could potentially keep it.
let new = current + 1;
self.mode.compare_exchange(current, new, SeqCst, SeqCst).is_ok()
}
#[inline]
pub unsafe fn write(&self) {
while !unsafe { self.try_write() } {
do_yield();
}
}
#[inline]
pub unsafe fn try_write(&self) -> bool {
self.mode.compare_exchange(0, -1, SeqCst, SeqCst).is_ok()
}
#[inline]
pub unsafe fn read_unlock(&self) {
self.mode.fetch_sub(1, SeqCst);
}
#[inline]
pub unsafe fn write_unlock(&self) {
assert_eq!(self.mode.compare_exchange(-1, 0, SeqCst, SeqCst), Ok(-1));
}
}

View file

@ -0,0 +1,37 @@
#![deny(unsafe_op_in_unsafe_fn)]
pub mod alloc;
#[path = "../unsupported/args.rs"]
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
#[path = "../unsupported/env.rs"]
pub mod env;
#[path = "../unsupported/fs.rs"]
pub mod fs;
#[path = "../unsupported/io.rs"]
pub mod io;
pub mod locks;
#[path = "../unsupported/net.rs"]
pub mod net;
#[path = "../unsupported/once.rs"]
pub mod once;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod os_str;
#[path = "../unix/path.rs"]
pub mod path;
#[path = "../unsupported/pipe.rs"]
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
pub mod stdio;
pub mod thread;
pub mod thread_local_key;
#[path = "../unsupported/thread_parking.rs"]
pub mod thread_parking;
pub mod time;
#[path = "../unsupported/common.rs"]
mod common;
pub use common::*;

View file

@ -0,0 +1,147 @@
use super::unsupported;
use crate::error::Error as StdError;
use crate::ffi::{OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::marker::PhantomData;
use crate::os::xous::ffi::Error as XousError;
use crate::path::{self, PathBuf};
#[cfg(not(test))]
mod c_compat {
use crate::os::xous::ffi::exit;
extern "C" {
fn main() -> u32;
}
#[no_mangle]
pub extern "C" fn abort() {
exit(1);
}
#[no_mangle]
pub extern "C" fn _start() {
exit(unsafe { main() });
}
// This function is needed by the panic runtime. The symbol is named in
// pre-link args for the target specification, so keep that in sync.
#[no_mangle]
// NB. used by both libunwind and libpanic_abort
pub extern "C" fn __rust_abort() -> ! {
exit(101);
}
}
pub fn errno() -> i32 {
0
}
pub fn error_string(errno: i32) -> String {
Into::<XousError>::into(errno).to_string()
}
pub fn getcwd() -> io::Result<PathBuf> {
unsupported()
}
pub fn chdir(_: &path::Path) -> io::Result<()> {
unsupported()
}
pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
panic!("unsupported")
}
impl<'a> Iterator for SplitPaths<'a> {
type Item = PathBuf;
fn next(&mut self) -> Option<PathBuf> {
self.0
}
}
#[derive(Debug)]
pub struct JoinPathsError;
pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
where
I: Iterator<Item = T>,
T: AsRef<OsStr>,
{
Err(JoinPathsError)
}
impl fmt::Display for JoinPathsError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"not supported on this platform yet".fmt(f)
}
}
impl StdError for JoinPathsError {
#[allow(deprecated)]
fn description(&self) -> &str {
"not supported on this platform yet"
}
}
pub fn current_exe() -> io::Result<PathBuf> {
unsupported()
}
pub struct Env(!);
impl Env {
// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
pub fn str_debug(&self) -> impl fmt::Debug + '_ {
let Self(inner) = self;
match *inner {}
}
}
impl fmt::Debug for Env {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(inner) = self;
match *inner {}
}
}
impl Iterator for Env {
type Item = (OsString, OsString);
fn next(&mut self) -> Option<(OsString, OsString)> {
self.0
}
}
pub fn env() -> Env {
panic!("not supported on this platform")
}
pub fn getenv(_: &OsStr) -> Option<OsString> {
None
}
pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
}
pub fn unsetenv(_: &OsStr) -> io::Result<()> {
Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
}
pub fn temp_dir() -> PathBuf {
panic!("no filesystem on this platform")
}
pub fn home_dir() -> Option<PathBuf> {
None
}
pub fn exit(code: i32) -> ! {
crate::os::xous::ffi::exit(code as u32);
}
pub fn getpid() -> u32 {
panic!("no pids on this platform")
}

View file

@ -0,0 +1,131 @@
use crate::io;
pub struct Stdin;
pub struct Stdout {}
pub struct Stderr;
use crate::os::xous::ffi::{lend, try_lend, try_scalar, Connection};
use crate::os::xous::services::{log_server, try_connect, LogScalar};
impl Stdin {
pub const fn new() -> Stdin {
Stdin
}
}
impl io::Read for Stdin {
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
Ok(0)
}
}
impl Stdout {
pub const fn new() -> Stdout {
Stdout {}
}
}
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
#[repr(align(4096))]
struct LendBuffer([u8; 4096]);
let mut lend_buffer = LendBuffer([0u8; 4096]);
let connection = log_server();
for chunk in buf.chunks(lend_buffer.0.len()) {
for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) {
*dest = *src;
}
lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap();
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Stderr {
pub const fn new() -> Stderr {
Stderr
}
}
impl io::Write for Stderr {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
#[repr(align(4096))]
struct LendBuffer([u8; 4096]);
let mut lend_buffer = LendBuffer([0u8; 4096]);
let connection = log_server();
for chunk in buf.chunks(lend_buffer.0.len()) {
for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) {
*dest = *src;
}
lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap();
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
pub const STDIN_BUF_SIZE: usize = 0;
pub fn is_ebadf(_err: &io::Error) -> bool {
true
}
#[derive(Copy, Clone)]
pub struct PanicWriter {
log: Connection,
gfx: Option<Connection>,
}
impl io::Write for PanicWriter {
fn write(&mut self, s: &[u8]) -> core::result::Result<usize, io::Error> {
for c in s.chunks(core::mem::size_of::<usize>() * 4) {
// Text is grouped into 4x `usize` words. The id is 1100 plus
// the number of characters in this message.
// Ignore errors since we're already panicking.
try_scalar(self.log, LogScalar::AppendPanicMessage(&c).into()).ok();
}
// Serialize the text to the graphics panic handler, only if we were able
// to acquire a connection to it. Text length is encoded in the `valid` field,
// the data itself in the buffer. Typically several messages are require to
// fully transmit the entire panic message.
if let Some(gfx) = self.gfx {
#[repr(C, align(4096))]
struct Request([u8; 4096]);
let mut request = Request([0u8; 4096]);
for (&s, d) in s.iter().zip(request.0.iter_mut()) {
*d = s;
}
try_lend(gfx, 0 /* AppendPanicText */, &request.0, 0, s.len()).ok();
}
Ok(s.len())
}
// Tests show that this does not seem to be reliably called at the end of a panic
// print, so, we can't rely on this to e.g. trigger a graphics update.
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
pub fn panic_output() -> Option<impl io::Write> {
// Generally this won't fail because every server has already connected, so
// this is likely to succeed.
let log = log_server();
// Send the "We're panicking" message (1000).
try_scalar(log, LogScalar::BeginPanic.into()).ok();
// This is will fail in the case that the connection table is full, or if the
// graphics server is not running. Most servers do not already have this connection.
let gfx = try_connect("panic-to-screen!");
Some(PanicWriter { log, gfx })
}

View file

@ -0,0 +1,144 @@
use crate::ffi::CStr;
use crate::io;
use crate::num::NonZeroUsize;
use crate::os::xous::ffi::{
blocking_scalar, create_thread, do_yield, join_thread, map_memory, update_memory_flags,
MemoryFlags, Syscall, ThreadId,
};
use crate::os::xous::services::{ticktimer_server, TicktimerScalar};
use crate::time::Duration;
use core::arch::asm;
pub struct Thread {
tid: ThreadId,
}
pub const DEFAULT_MIN_STACK_SIZE: usize = 131072;
const MIN_STACK_SIZE: usize = 4096;
pub const GUARD_PAGE_SIZE: usize = 4096;
impl Thread {
// unsafe: see thread::Builder::spawn_unchecked for safety requirements
pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
let p = Box::into_raw(Box::new(p));
let mut stack_size = crate::cmp::max(stack, MIN_STACK_SIZE);
if (stack_size & 4095) != 0 {
stack_size = (stack_size + 4095) & !4095;
}
// Allocate the whole thing, then divide it up after the fact. This ensures that
// even if there's a context switch during this function, the whole stack plus
// guard pages will remain contiguous.
let stack_plus_guard_pages: &mut [u8] = unsafe {
map_memory(
None,
None,
GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE,
MemoryFlags::R | MemoryFlags::W | MemoryFlags::X,
)
}
.map_err(|code| io::Error::from_raw_os_error(code as i32))?;
// No access to this page. Note: Write-only pages are illegal, and will
// cause an access violation.
unsafe {
update_memory_flags(&mut stack_plus_guard_pages[0..GUARD_PAGE_SIZE], MemoryFlags::W)
.map_err(|code| io::Error::from_raw_os_error(code as i32))?
};
// No access to this page. Note: Write-only pages are illegal, and will
// cause an access violation.
unsafe {
update_memory_flags(
&mut stack_plus_guard_pages[(GUARD_PAGE_SIZE + stack_size)..],
MemoryFlags::W,
)
.map_err(|code| io::Error::from_raw_os_error(code as i32))?
};
let guard_page_pre = stack_plus_guard_pages.as_ptr() as usize;
let tid = create_thread(
thread_start as *mut usize,
&mut stack_plus_guard_pages[GUARD_PAGE_SIZE..(stack_size + GUARD_PAGE_SIZE)],
p as usize,
guard_page_pre,
stack_size,
0,
)
.map_err(|code| io::Error::from_raw_os_error(code as i32))?;
extern "C" fn thread_start(main: *mut usize, guard_page_pre: usize, stack_size: usize) {
unsafe {
// Finally, let's run some code.
Box::from_raw(main as *mut Box<dyn FnOnce()>)();
}
// Destroy TLS, which will free the TLS page and call the destructor for
// any thread local storage.
unsafe {
crate::sys::thread_local_key::destroy_tls();
}
// Deallocate the stack memory, along with the guard pages. Afterwards,
// exit the thread by returning to the magic address 0xff80_3000usize,
// which tells the kernel to deallocate this thread.
let mapped_memory_base = guard_page_pre;
let mapped_memory_length = GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE;
unsafe {
asm!(
"ecall",
"ret",
in("a0") Syscall::UnmapMemory as usize,
in("a1") mapped_memory_base,
in("a2") mapped_memory_length,
in("ra") 0xff80_3000usize,
options(nomem, nostack, noreturn)
);
}
}
Ok(Thread { tid })
}
pub fn yield_now() {
do_yield();
}
pub fn set_name(_name: &CStr) {
// nope
}
pub fn sleep(dur: Duration) {
// Because the sleep server works on units of `usized milliseconds`, split
// the messages up into these chunks. This means we may run into issues
// if you try to sleep a thread for more than 49 days on a 32-bit system.
let mut millis = dur.as_millis();
while millis > 0 {
let sleep_duration =
if millis > (usize::MAX as _) { usize::MAX } else { millis as usize };
blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into())
.expect("failed to send message to ticktimer server");
millis -= sleep_duration as u128;
}
}
pub fn join(self) {
join_thread(self.tid).unwrap();
}
}
pub fn available_parallelism() -> io::Result<NonZeroUsize> {
// We're unicore right now.
Ok(unsafe { NonZeroUsize::new_unchecked(1) })
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -0,0 +1,190 @@
use crate::mem::ManuallyDrop;
use crate::ptr;
use crate::sync::atomic::AtomicPtr;
use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::SeqCst;
use core::arch::asm;
use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags};
/// Thread Local Storage
///
/// Currently, we are limited to 1023 TLS entries. The entries
/// live in a page of memory that's unique per-process, and is
/// stored in the `$tp` register. If this register is 0, then
/// TLS has not been initialized and thread cleanup can be skipped.
///
/// The index into this register is the `key`. This key is identical
/// between all threads, but indexes a different offset within this
/// pointer.
pub type Key = usize;
pub type Dtor = unsafe extern "C" fn(*mut u8);
const TLS_MEMORY_SIZE: usize = 4096;
/// TLS keys start at `1` to mimic POSIX.
static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1);
fn tls_ptr_addr() -> *mut usize {
let mut tp: usize;
unsafe {
asm!(
"mv {}, tp",
out(reg) tp,
);
}
core::ptr::from_exposed_addr_mut::<usize>(tp)
}
/// Create an area of memory that's unique per thread. This area will
/// contain all thread local pointers.
fn tls_ptr() -> *mut usize {
let mut tp = tls_ptr_addr();
// If the TP register is `0`, then this thread hasn't initialized
// its TLS yet. Allocate a new page to store this memory.
if tp.is_null() {
tp = unsafe {
map_memory(
None,
None,
TLS_MEMORY_SIZE / core::mem::size_of::<usize>(),
MemoryFlags::R | MemoryFlags::W,
)
}
.expect("Unable to allocate memory for thread local storage")
.as_mut_ptr();
unsafe {
// Key #0 is currently unused.
(tp).write_volatile(0);
// Set the thread's `$tp` register
asm!(
"mv tp, {}",
in(reg) tp as usize,
);
}
}
tp
}
/// Allocate a new TLS key. These keys are shared among all threads.
fn tls_alloc() -> usize {
TLS_KEY_INDEX.fetch_add(1, SeqCst)
}
#[inline]
pub unsafe fn create(dtor: Option<Dtor>) -> Key {
let key = tls_alloc();
if let Some(f) = dtor {
unsafe { register_dtor(key, f) };
}
key
}
#[inline]
pub unsafe fn set(key: Key, value: *mut u8) {
assert!((key < 1022) && (key >= 1));
unsafe { tls_ptr().add(key).write_volatile(value as usize) };
}
#[inline]
pub unsafe fn get(key: Key) -> *mut u8 {
assert!((key < 1022) && (key >= 1));
core::ptr::from_exposed_addr_mut::<u8>(unsafe { tls_ptr().add(key).read_volatile() })
}
#[inline]
pub unsafe fn destroy(_key: Key) {
panic!("can't destroy keys on Xous");
}
// -------------------------------------------------------------------------
// Dtor registration (stolen from Windows)
//
// Xous has no native support for running destructors so we manage our own
// list of destructors to keep track of how to destroy keys. We then install a
// callback later to get invoked whenever a thread exits, running all
// appropriate destructors.
//
// Currently unregistration from this list is not supported. A destructor can be
// registered but cannot be unregistered. There's various simplifying reasons
// for doing this, the big ones being:
//
// 1. Currently we don't even support deallocating TLS keys, so normal operation
// doesn't need to deallocate a destructor.
// 2. There is no point in time where we know we can unregister a destructor
// because it could always be getting run by some remote thread.
//
// Typically processes have a statically known set of TLS keys which is pretty
// small, and we'd want to keep this memory alive for the whole process anyway
// really.
//
// Perhaps one day we can fold the `Box` here into a static allocation,
// expanding the `StaticKey` structure to contain not only a slot for the TLS
// key but also a slot for the destructor queue on windows. An optimization for
// another day!
static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
struct Node {
dtor: Dtor,
key: Key,
next: *mut Node,
}
unsafe fn register_dtor(key: Key, dtor: Dtor) {
let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
let mut head = DTORS.load(SeqCst);
loop {
node.next = head;
match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
Ok(_) => return, // nothing to drop, we successfully added the node to the list
Err(cur) => head = cur,
}
}
}
pub unsafe fn destroy_tls() {
let tp = tls_ptr_addr();
// If the pointer address is 0, then this thread has no TLS.
if tp.is_null() {
return;
}
unsafe { run_dtors() };
// Finally, free the TLS array
unsafe {
unmap_memory(core::slice::from_raw_parts_mut(
tp,
TLS_MEMORY_SIZE / core::mem::size_of::<usize>(),
))
.unwrap()
};
}
unsafe fn run_dtors() {
let mut any_run = true;
for _ in 0..5 {
if !any_run {
break;
}
any_run = false;
let mut cur = DTORS.load(SeqCst);
while !cur.is_null() {
let ptr = unsafe { get((*cur).key) };
if !ptr.is_null() {
unsafe { set((*cur).key, ptr::null_mut()) };
unsafe { ((*cur).dtor)(ptr as *mut _) };
any_run = true;
}
unsafe { cur = (*cur).next };
}
}
}

View file

@ -0,0 +1,57 @@
use crate::os::xous::ffi::blocking_scalar;
use crate::os::xous::services::{
systime_server, ticktimer_server, SystimeScalar::GetUtcTimeMs, TicktimerScalar::ElapsedMs,
};
use crate::time::Duration;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
pub struct Instant(Duration);
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
pub struct SystemTime(Duration);
pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
impl Instant {
pub fn now() -> Instant {
let result = blocking_scalar(ticktimer_server(), ElapsedMs.into())
.expect("failed to request elapsed_ms");
let lower = result[0];
let upper = result[1];
Instant { 0: Duration::from_millis(lower as u64 | (upper as u64) << 32) }
}
pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
self.0.checked_sub(other.0)
}
pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
self.0.checked_add(*other).map(Instant)
}
pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
self.0.checked_sub(*other).map(Instant)
}
}
impl SystemTime {
pub fn now() -> SystemTime {
let result = blocking_scalar(systime_server(), GetUtcTimeMs.into())
.expect("failed to request utc time in ms");
let lower = result[0];
let upper = result[1];
SystemTime { 0: Duration::from_millis((upper as u64) << 32 | lower as u64) }
}
pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
}
pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
Some(SystemTime(self.0.checked_add(*other)?))
}
pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
Some(SystemTime(self.0.checked_sub(*other)?))
}
}