This commit is an implementation of [RFC 503][rfc] which is a stabilization story for the prelude. Most of the RFC was directly applied, removing reexports. Some reexports are kept around, however: * `range` remains until range syntax has landed to reduce churn. * `Path` and `GenericPath` remain until path reform lands. This is done to prevent many imports of `GenericPath` which will soon be removed. * All `io` traits remain until I/O reform lands so imports can be rewritten all at once to `std::io::prelude::*`. This is a breaking change because many prelude reexports have been removed, and the RFC can be consulted for the exact list of removed reexports, as well as to find the locations of where to import them. [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/0503-prelude-stabilization.md [breaking-change] Closes #20068
80 lines
2.3 KiB
Rust
80 lines
2.3 KiB
Rust
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
|
// file at the top-level directory of this distribution and at
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
// option. This file may not be copied, modified, or distributed
|
|
// except according to those terms.
|
|
|
|
use prelude::v1::*;
|
|
|
|
use sync::atomic;
|
|
use alloc::{mod, heap};
|
|
|
|
use libc::DWORD;
|
|
use sys::sync as ffi;
|
|
|
|
const SPIN_COUNT: DWORD = 4000;
|
|
|
|
pub struct Mutex { inner: atomic::AtomicUint }
|
|
|
|
pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::INIT_ATOMIC_UINT };
|
|
|
|
unsafe impl Sync for Mutex {}
|
|
|
|
#[inline]
|
|
pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION {
|
|
m.get()
|
|
}
|
|
|
|
impl Mutex {
|
|
#[inline]
|
|
pub unsafe fn new() -> Mutex {
|
|
Mutex { inner: atomic::AtomicUint::new(init_lock() as uint) }
|
|
}
|
|
#[inline]
|
|
pub unsafe fn lock(&self) {
|
|
ffi::EnterCriticalSection(self.get())
|
|
}
|
|
#[inline]
|
|
pub unsafe fn try_lock(&self) -> bool {
|
|
ffi::TryEnterCriticalSection(self.get()) != 0
|
|
}
|
|
#[inline]
|
|
pub unsafe fn unlock(&self) {
|
|
ffi::LeaveCriticalSection(self.get())
|
|
}
|
|
pub unsafe fn destroy(&self) {
|
|
let lock = self.inner.swap(0, atomic::SeqCst);
|
|
if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) }
|
|
}
|
|
|
|
unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION {
|
|
match self.inner.load(atomic::SeqCst) {
|
|
0 => {}
|
|
n => return n as ffi::LPCRITICAL_SECTION
|
|
}
|
|
let lock = init_lock();
|
|
match self.inner.compare_and_swap(0, lock as uint, atomic::SeqCst) {
|
|
0 => return lock as ffi::LPCRITICAL_SECTION,
|
|
_ => {}
|
|
}
|
|
free_lock(lock);
|
|
return self.inner.load(atomic::SeqCst) as ffi::LPCRITICAL_SECTION;
|
|
}
|
|
}
|
|
|
|
unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION {
|
|
let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8)
|
|
as ffi::LPCRITICAL_SECTION;
|
|
if block.is_null() { alloc::oom() }
|
|
ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
|
|
return block;
|
|
}
|
|
|
|
unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) {
|
|
ffi::DeleteCriticalSection(h);
|
|
heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8);
|
|
}
|