Merge branch 'master' into redox

This commit is contained in:
Jeremy Soller 2016-11-03 08:52:48 -06:00
commit 74dc845c2d
299 changed files with 2974 additions and 1278 deletions

View file

@ -1,80 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of running at_exit routines
//!
//! Documentation can be found on the `rt::at_exit` function.
use alloc::boxed::FnBox;
use ptr;
use sys_common::mutex::Mutex;
type Queue = Vec<Box<FnBox()>>;
// NB these are specifically not types from `std::sync` as they currently rely
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
static LOCK: Mutex = Mutex::new();
static mut QUEUE: *mut Queue = ptr::null_mut();
// The maximum number of times the cleanup routines will be run. While running
// the at_exit closures new ones may be registered, and this count is the number
// of times the new closures will be allowed to register successfully. After
// this number of iterations all new registrations will return `false`.
const ITERS: usize = 10;
unsafe fn init() -> bool {
if QUEUE.is_null() {
let state: Box<Queue> = box Vec::new();
QUEUE = Box::into_raw(state);
} else if QUEUE as usize == 1 {
// can't re-init after a cleanup
return false
}
true
}
pub fn cleanup() {
for i in 0..ITERS {
unsafe {
LOCK.lock();
let queue = QUEUE;
QUEUE = if i == ITERS - 1 {1} else {0} as *mut _;
LOCK.unlock();
// make sure we're not recursively cleaning up
assert!(queue as usize != 1);
// If we never called init, not need to cleanup!
if queue as usize != 0 {
let queue: Box<Queue> = Box::from_raw(queue);
for to_run in *queue {
to_run();
}
}
}
}
}
pub fn push(f: Box<FnBox()>) -> bool {
let mut ret = true;
unsafe {
LOCK.lock();
if init() {
(*QUEUE).push(f);
} else {
ret = false;
}
LOCK.unlock();
}
ret
}

View file

@ -1,257 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg_attr(target_os = "nacl", allow(dead_code))]
use env;
use io::prelude::*;
use io;
use libc;
use str;
use sync::atomic::{self, Ordering};
pub use sys::backtrace::write;
#[cfg(target_pointer_width = "64")]
pub const HEX_WIDTH: usize = 18;
#[cfg(target_pointer_width = "32")]
pub const HEX_WIDTH: usize = 10;
// For now logging is turned off by default, and this function checks to see
// whether the magical environment variable is present to see if it's turned on.
pub fn log_enabled() -> bool {
static ENABLED: atomic::AtomicIsize = atomic::AtomicIsize::new(0);
match ENABLED.load(Ordering::SeqCst) {
1 => return false,
2 => return true,
_ => {}
}
let val = match env::var_os("RUST_BACKTRACE") {
Some(x) => if &x == "0" { 1 } else { 2 },
None => 1,
};
ENABLED.store(val, Ordering::SeqCst);
val == 2
}
// These output functions should now be used everywhere to ensure consistency.
pub fn output(w: &mut Write, idx: isize, addr: *mut libc::c_void,
s: Option<&[u8]>) -> io::Result<()> {
write!(w, " {:2}: {:2$?} - ", idx, addr, HEX_WIDTH)?;
match s.and_then(|s| str::from_utf8(s).ok()) {
Some(string) => demangle(w, string)?,
None => write!(w, "<unknown>")?,
}
w.write_all(&['\n' as u8])
}
#[allow(dead_code)]
pub fn output_fileline(w: &mut Write, file: &[u8], line: libc::c_int,
more: bool) -> io::Result<()> {
let file = str::from_utf8(file).unwrap_or("<unknown>");
// prior line: " ##: {:2$} - func"
write!(w, " {:3$}at {}:{}", "", file, line, HEX_WIDTH)?;
if more {
write!(w, " <... and possibly more>")?;
}
w.write_all(&['\n' as u8])
}
// All rust symbols are in theory lists of "::"-separated identifiers. Some
// assemblers, however, can't handle these characters in symbol names. To get
// around this, we use C++-style mangling. The mangling method is:
//
// 1. Prefix the symbol with "_ZN"
// 2. For each element of the path, emit the length plus the element
// 3. End the path with "E"
//
// For example, "_ZN4testE" => "test" and "_ZN3foo3barE" => "foo::bar".
//
// We're the ones printing our backtraces, so we can't rely on anything else to
// demangle our symbols. It's *much* nicer to look at demangled symbols, so
// this function is implemented to give us nice pretty output.
//
// Note that this demangler isn't quite as fancy as it could be. We have lots
// of other information in our symbols like hashes, version, type information,
// etc. Additionally, this doesn't handle glue symbols at all.
pub fn demangle(writer: &mut Write, s: &str) -> io::Result<()> {
// First validate the symbol. If it doesn't look like anything we're
// expecting, we just print it literally. Note that we must handle non-rust
// symbols because we could have any function in the backtrace.
let mut valid = true;
let mut inner = s;
if s.len() > 4 && s.starts_with("_ZN") && s.ends_with("E") {
inner = &s[3 .. s.len() - 1];
// On Windows, dbghelp strips leading underscores, so we accept "ZN...E" form too.
} else if s.len() > 3 && s.starts_with("ZN") && s.ends_with("E") {
inner = &s[2 .. s.len() - 1];
} else {
valid = false;
}
if valid {
let mut chars = inner.chars();
while valid {
let mut i = 0;
for c in chars.by_ref() {
if c.is_numeric() {
i = i * 10 + c as usize - '0' as usize;
} else {
break
}
}
if i == 0 {
valid = chars.next().is_none();
break
} else if chars.by_ref().take(i - 1).count() != i - 1 {
valid = false;
}
}
}
// Alright, let's do this.
if !valid {
writer.write_all(s.as_bytes())?;
} else {
let mut first = true;
while !inner.is_empty() {
if !first {
writer.write_all(b"::")?;
} else {
first = false;
}
let mut rest = inner;
while rest.chars().next().unwrap().is_numeric() {
rest = &rest[1..];
}
let i: usize = inner[.. (inner.len() - rest.len())].parse().unwrap();
inner = &rest[i..];
rest = &rest[..i];
if rest.starts_with("_$") {
rest = &rest[1..];
}
while !rest.is_empty() {
if rest.starts_with(".") {
if let Some('.') = rest[1..].chars().next() {
writer.write_all(b"::")?;
rest = &rest[2..];
} else {
writer.write_all(b".")?;
rest = &rest[1..];
}
} else if rest.starts_with("$") {
macro_rules! demangle {
($($pat:expr => $demangled:expr),*) => ({
$(if rest.starts_with($pat) {
writer.write_all($demangled)?;
rest = &rest[$pat.len()..];
} else)*
{
writer.write_all(rest.as_bytes())?;
break;
}
})
}
// see src/librustc/back/link.rs for these mappings
demangle! (
"$SP$" => b"@",
"$BP$" => b"*",
"$RF$" => b"&",
"$LT$" => b"<",
"$GT$" => b">",
"$LP$" => b"(",
"$RP$" => b")",
"$C$" => b",",
// in theory we can demangle any Unicode code point, but
// for simplicity we just catch the common ones.
"$u7e$" => b"~",
"$u20$" => b" ",
"$u27$" => b"'",
"$u5b$" => b"[",
"$u5d$" => b"]",
"$u7b$" => b"{",
"$u7d$" => b"}",
"$u3b$" => b";",
"$u2b$" => b"+",
"$u22$" => b"\""
)
} else {
let idx = match rest.char_indices().find(|&(_, c)| c == '$' || c == '.') {
None => rest.len(),
Some((i, _)) => i,
};
writer.write_all(rest[..idx].as_bytes())?;
rest = &rest[idx..];
}
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use sys_common;
macro_rules! t { ($a:expr, $b:expr) => ({
let mut m = Vec::new();
sys_common::backtrace::demangle(&mut m, $a).unwrap();
assert_eq!(String::from_utf8(m).unwrap(), $b);
}) }
#[test]
fn demangle() {
t!("test", "test");
t!("_ZN4testE", "test");
t!("_ZN4test", "_ZN4test");
t!("_ZN4test1a2bcE", "test::a::bc");
}
#[test]
fn demangle_dollars() {
t!("_ZN4$RP$E", ")");
t!("_ZN8$RF$testE", "&test");
t!("_ZN8$BP$test4foobE", "*test::foob");
t!("_ZN9$u20$test4foobE", " test::foob");
t!("_ZN35Bar$LT$$u5b$u32$u3b$$u20$4$u5d$$GT$E", "Bar<[u32; 4]>");
}
#[test]
fn demangle_many_dollars() {
t!("_ZN13test$u20$test4foobE", "test test::foob");
t!("_ZN12test$BP$test4foobE", "test*test::foob");
}
#[test]
fn demangle_windows() {
t!("ZN4testE", "test");
t!("ZN13test$u20$test4foobE", "test test::foob");
t!("ZN12test$RF$test4foobE", "test&test::foob");
}
#[test]
fn demangle_elements_beginning_with_underscore() {
t!("_ZN13_$LT$test$GT$E", "<test>");
t!("_ZN28_$u7b$$u7b$closure$u7d$$u7d$E", "{{closure}}");
t!("_ZN15__STATIC_FMTSTRE", "__STATIC_FMTSTR");
}
#[test]
fn demangle_trait_impls() {
t!("_ZN71_$LT$Test$u20$$u2b$$u20$$u27$static$u20$as$u20$foo..Bar$LT$Test$GT$$GT$3barE",
"<Test + 'static as foo::Bar<Test>>::bar");
}
}

View file

@ -1,70 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use time::Duration;
use sys_common::mutex::{self, Mutex};
use sys::condvar as imp;
/// An OS-based condition variable.
///
/// This structure is the lowest layer possible on top of the OS-provided
/// condition variables. It is consequently entirely unsafe to use. It is
/// recommended to use the safer types at the top level of this crate instead of
/// this type.
pub struct Condvar(imp::Condvar);
impl Condvar {
/// Creates a new condition variable for use.
///
/// Behavior is undefined if the condition variable is moved after it is
/// first used with any of the functions below.
pub const fn new() -> Condvar { Condvar(imp::Condvar::new()) }
/// Prepares the condition variable for use.
///
/// This should be called once the condition variable is at a stable memory
/// address.
#[inline]
pub unsafe fn init(&mut self) { self.0.init() }
/// Signals one waiter on this condition variable to wake up.
#[inline]
pub unsafe fn notify_one(&self) { self.0.notify_one() }
/// Awakens all current waiters on this condition variable.
#[inline]
pub unsafe fn notify_all(&self) { self.0.notify_all() }
/// Waits for a signal on the specified mutex.
///
/// Behavior is undefined if the mutex is not locked by the current thread.
/// Behavior is also undefined if more than one mutex is used concurrently
/// on this condition variable.
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) { self.0.wait(mutex::raw(mutex)) }
/// Waits for a signal on the specified mutex with a timeout duration
/// specified by `dur` (a relative time into the future).
///
/// Behavior is undefined if the mutex is not locked by the current thread.
/// Behavior is also undefined if more than one mutex is used concurrently
/// on this condition variable.
#[inline]
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
self.0.wait_timeout(mutex::raw(mutex), dur)
}
/// Deallocates all resources associated with this condition variable.
///
/// Behavior is undefined if there are current or will be future users of
/// this condition variable.
#[inline]
pub unsafe fn destroy(&self) { self.0.destroy() }
}

View file

@ -1,181 +0,0 @@
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use io;
use io::prelude::*;
use libc;
use sys_common::backtrace::{output, output_fileline};
pub fn print(w: &mut Write, idx: isize, addr: *mut libc::c_void,
symaddr: *mut libc::c_void) -> io::Result<()> {
use ffi::CStr;
use ptr;
////////////////////////////////////////////////////////////////////////
// libbacktrace.h API
////////////////////////////////////////////////////////////////////////
type backtrace_syminfo_callback =
extern "C" fn(data: *mut libc::c_void,
pc: libc::uintptr_t,
symname: *const libc::c_char,
symval: libc::uintptr_t,
symsize: libc::uintptr_t);
type backtrace_full_callback =
extern "C" fn(data: *mut libc::c_void,
pc: libc::uintptr_t,
filename: *const libc::c_char,
lineno: libc::c_int,
function: *const libc::c_char) -> libc::c_int;
type backtrace_error_callback =
extern "C" fn(data: *mut libc::c_void,
msg: *const libc::c_char,
errnum: libc::c_int);
enum backtrace_state {}
#[link(name = "backtrace", kind = "static")]
#[cfg(all(not(test), not(cargobuild)))]
extern {}
extern {
fn backtrace_create_state(filename: *const libc::c_char,
threaded: libc::c_int,
error: backtrace_error_callback,
data: *mut libc::c_void)
-> *mut backtrace_state;
fn backtrace_syminfo(state: *mut backtrace_state,
addr: libc::uintptr_t,
cb: backtrace_syminfo_callback,
error: backtrace_error_callback,
data: *mut libc::c_void) -> libc::c_int;
fn backtrace_pcinfo(state: *mut backtrace_state,
addr: libc::uintptr_t,
cb: backtrace_full_callback,
error: backtrace_error_callback,
data: *mut libc::c_void) -> libc::c_int;
}
////////////////////////////////////////////////////////////////////////
// helper callbacks
////////////////////////////////////////////////////////////////////////
type FileLine = (*const libc::c_char, libc::c_int);
extern fn error_cb(_data: *mut libc::c_void, _msg: *const libc::c_char,
_errnum: libc::c_int) {
// do nothing for now
}
extern fn syminfo_cb(data: *mut libc::c_void,
_pc: libc::uintptr_t,
symname: *const libc::c_char,
_symval: libc::uintptr_t,
_symsize: libc::uintptr_t) {
let slot = data as *mut *const libc::c_char;
unsafe { *slot = symname; }
}
extern fn pcinfo_cb(data: *mut libc::c_void,
_pc: libc::uintptr_t,
filename: *const libc::c_char,
lineno: libc::c_int,
_function: *const libc::c_char) -> libc::c_int {
if !filename.is_null() {
let slot = data as *mut &mut [FileLine];
let buffer = unsafe {ptr::read(slot)};
// if the buffer is not full, add file:line to the buffer
// and adjust the buffer for next possible calls to pcinfo_cb.
if !buffer.is_empty() {
buffer[0] = (filename, lineno);
unsafe { ptr::write(slot, &mut buffer[1..]); }
}
}
0
}
// The libbacktrace API supports creating a state, but it does not
// support destroying a state. I personally take this to mean that a
// state is meant to be created and then live forever.
//
// I would love to register an at_exit() handler which cleans up this
// state, but libbacktrace provides no way to do so.
//
// With these constraints, this function has a statically cached state
// that is calculated the first time this is requested. Remember that
// backtracing all happens serially (one global lock).
//
// Things don't work so well on not-Linux since libbacktrace can't track
// down that executable this is. We at one point used env::current_exe but
// it turns out that there are some serious security issues with that
// approach.
//
// Specifically, on certain platforms like BSDs, a malicious actor can cause
// an arbitrary file to be placed at the path returned by current_exe.
// libbacktrace does not behave defensively in the presence of ill-formed
// DWARF information, and has been demonstrated to segfault in at least one
// case. There is no evidence at the moment to suggest that a more carefully
// constructed file can't cause arbitrary code execution. As a result of all
// of this, we don't hint libbacktrace with the path to the current process.
unsafe fn init_state() -> *mut backtrace_state {
static mut STATE: *mut backtrace_state = ptr::null_mut();
if !STATE.is_null() { return STATE }
STATE = backtrace_create_state(ptr::null(), 0, error_cb,
ptr::null_mut());
STATE
}
////////////////////////////////////////////////////////////////////////
// translation
////////////////////////////////////////////////////////////////////////
// backtrace errors are currently swept under the rug, only I/O
// errors are reported
let state = unsafe { init_state() };
if state.is_null() {
return output(w, idx, addr, None)
}
let mut data = ptr::null();
let data_addr = &mut data as *mut *const libc::c_char;
let ret = unsafe {
backtrace_syminfo(state, symaddr as libc::uintptr_t,
syminfo_cb, error_cb,
data_addr as *mut libc::c_void)
};
if ret == 0 || data.is_null() {
output(w, idx, addr, None)?;
} else {
output(w, idx, addr, Some(unsafe { CStr::from_ptr(data).to_bytes() }))?;
}
// pcinfo may return an arbitrary number of file:line pairs,
// in the order of stack trace (i.e. inlined calls first).
// in order to avoid allocation, we stack-allocate a fixed size of entries.
const FILELINE_SIZE: usize = 32;
let mut fileline_buf = [(ptr::null(), -1); FILELINE_SIZE];
let ret;
let fileline_count;
{
let mut fileline_win: &mut [FileLine] = &mut fileline_buf;
let fileline_addr = &mut fileline_win as *mut &mut [FileLine];
ret = unsafe {
backtrace_pcinfo(state, addr as libc::uintptr_t,
pcinfo_cb, error_cb,
fileline_addr as *mut libc::c_void)
};
fileline_count = FILELINE_SIZE - fileline_win.len();
}
if ret == 0 {
for (i, &(file, line)) in fileline_buf[..fileline_count].iter().enumerate() {
if file.is_null() { continue; } // just to be sure
let file = unsafe { CStr::from_ptr(file).to_bytes() };
output_fileline(w, file, line, i == FILELINE_SIZE - 1)?;
}
}
Ok(())
}

View file

@ -1,15 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
pub mod libbacktrace;

View file

@ -1,177 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use io;
use io::ErrorKind;
use io::Read;
use slice::from_raw_parts_mut;
// Provides read_to_end functionality over an uninitialized buffer.
// This function is unsafe because it calls the underlying
// read function with a slice into uninitialized memory. The default
// implementation of read_to_end for readers will zero out new memory in
// the buf before passing it to read, but avoiding this zero can often
// lead to a fairly significant performance win.
//
// Implementations using this method have to adhere to two guarantees:
// * The implementation of read never reads the buffer provided.
// * The implementation of read correctly reports how many bytes were written.
pub unsafe fn read_to_end_uninitialized(r: &mut Read, buf: &mut Vec<u8>) -> io::Result<usize> {
let start_len = buf.len();
buf.reserve(16);
// Always try to read into the empty space of the vector (from the length to the capacity).
// If the vector ever fills up then we reserve an extra byte which should trigger the normal
// reallocation routines for the vector, which will likely double the size.
//
// This function is similar to the read_to_end function in std::io, but the logic about
// reservations and slicing is different enough that this is duplicated here.
loop {
if buf.len() == buf.capacity() {
buf.reserve(1);
}
let buf_slice = from_raw_parts_mut(buf.as_mut_ptr().offset(buf.len() as isize),
buf.capacity() - buf.len());
match r.read(buf_slice) {
Ok(0) => { return Ok(buf.len() - start_len); }
Ok(n) => { let len = buf.len() + n; buf.set_len(len); },
Err(ref e) if e.kind() == ErrorKind::Interrupted => { }
Err(e) => { return Err(e); }
}
}
}
#[cfg(test)]
#[allow(dead_code)] // not used on emscripten
pub mod test {
use path::{Path, PathBuf};
use env;
use rand::{self, Rng};
use fs;
pub struct TempDir(PathBuf);
impl TempDir {
pub fn join(&self, path: &str) -> PathBuf {
let TempDir(ref p) = *self;
p.join(path)
}
pub fn path<'a>(&'a self) -> &'a Path {
let TempDir(ref p) = *self;
p
}
}
impl Drop for TempDir {
fn drop(&mut self) {
// Gee, seeing how we're testing the fs module I sure hope that we
// at least implement this correctly!
let TempDir(ref p) = *self;
fs::remove_dir_all(p).unwrap();
}
}
pub fn tmpdir() -> TempDir {
let p = env::temp_dir();
let mut r = rand::thread_rng();
let ret = p.join(&format!("rust-{}", r.next_u32()));
fs::create_dir(&ret).unwrap();
TempDir(ret)
}
}
#[cfg(test)]
mod tests {
use io::prelude::*;
use super::*;
use io;
use io::{ErrorKind, Take, Repeat, repeat};
use slice::from_raw_parts;
struct ErrorRepeat {
lr: Take<Repeat>
}
fn error_repeat(byte: u8, limit: u64) -> ErrorRepeat {
ErrorRepeat { lr: repeat(byte).take(limit) }
}
impl Read for ErrorRepeat {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let ret = self.lr.read(buf);
if let Ok(0) = ret {
return Err(io::Error::new(ErrorKind::Other, ""))
}
ret
}
}
fn init_vec_data() -> Vec<u8> {
let mut vec = vec![10u8; 200];
unsafe { vec.set_len(0); }
vec
}
fn assert_all_eq(buf: &[u8], value: u8) {
for n in buf {
assert_eq!(*n, value);
}
}
fn validate(buf: &Vec<u8>, good_read_len: usize) {
assert_all_eq(buf, 1u8);
let cap = buf.capacity();
let end_slice = unsafe { from_raw_parts(buf.as_ptr().offset(good_read_len as isize),
cap - good_read_len) };
assert_all_eq(end_slice, 10u8);
}
#[test]
fn read_to_end_uninit_error() {
let mut er = error_repeat(1,100);
let mut vec = init_vec_data();
if let Err(_) = unsafe { read_to_end_uninitialized(&mut er, &mut vec) } {
validate(&vec, 100);
} else {
assert!(false);
}
}
#[test]
fn read_to_end_uninit_zero_len_vec() {
let mut er = repeat(1).take(100);
let mut vec = Vec::new();
let n = unsafe{ read_to_end_uninitialized(&mut er, &mut vec).unwrap() };
assert_all_eq(&vec, 1u8);
assert_eq!(vec.len(), n);
}
#[test]
fn read_to_end_uninit_good() {
let mut er = repeat(1).take(100);
let mut vec = init_vec_data();
let n = unsafe{ read_to_end_uninitialized(&mut er, &mut vec).unwrap() };
validate(&vec, 100);
assert_eq!(vec.len(), n);
}
#[bench]
#[cfg_attr(target_os = "emscripten", ignore)]
fn bench_uninitialized(b: &mut ::test::Bencher) {
b.iter(|| {
let mut lr = repeat(1).take(10000000);
let mut vec = Vec::with_capacity(1024);
unsafe { read_to_end_uninitialized(&mut lr, &mut vec) }
});
}
}

View file

@ -1,230 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// Original implementation taken from rust-memchr
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
#[allow(dead_code)]
pub mod fallback {
use cmp;
use mem;
const LO_U64: u64 = 0x0101010101010101;
const HI_U64: u64 = 0x8080808080808080;
// use truncation
const LO_USIZE: usize = LO_U64 as usize;
const HI_USIZE: usize = HI_U64 as usize;
/// Return `true` if `x` contains any zero byte.
///
/// From *Matters Computational*, J. Arndt
///
/// "The idea is to subtract one from each of the bytes and then look for
/// bytes where the borrow propagated all the way to the most significant
/// bit."
#[inline]
fn contains_zero_byte(x: usize) -> bool {
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
}
#[cfg(target_pointer_width = "32")]
#[inline]
fn repeat_byte(b: u8) -> usize {
let mut rep = (b as usize) << 8 | b as usize;
rep = rep << 16 | rep;
rep
}
#[cfg(target_pointer_width = "64")]
#[inline]
fn repeat_byte(b: u8) -> usize {
let mut rep = (b as usize) << 8 | b as usize;
rep = rep << 16 | rep;
rep = rep << 32 | rep;
rep
}
/// Return the first index matching the byte `a` in `text`.
pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
// Scan for a single byte value by reading two `usize` words at a time.
//
// Split `text` in three parts
// - unaligned initial part, before the first word aligned address in text
// - body, scan by 2 words at a time
// - the last remaining part, < 2 word size
let len = text.len();
let ptr = text.as_ptr();
let usize_bytes = mem::size_of::<usize>();
// search up to an aligned boundary
let align = (ptr as usize) & (usize_bytes- 1);
let mut offset;
if align > 0 {
offset = cmp::min(usize_bytes - align, len);
if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
return Some(index);
}
} else {
offset = 0;
}
// search the body of the text
let repeated_x = repeat_byte(x);
if len >= 2 * usize_bytes {
while offset <= len - 2 * usize_bytes {
unsafe {
let u = *(ptr.offset(offset as isize) as *const usize);
let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize);
// break if there is a matching byte
let zu = contains_zero_byte(u ^ repeated_x);
let zv = contains_zero_byte(v ^ repeated_x);
if zu || zv {
break;
}
}
offset += usize_bytes * 2;
}
}
// find the byte after the point the body loop stopped
text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
}
/// Return the last index matching the byte `a` in `text`.
pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
// Scan for a single byte value by reading two `usize` words at a time.
//
// Split `text` in three parts
// - unaligned tail, after the last word aligned address in text
// - body, scan by 2 words at a time
// - the first remaining bytes, < 2 word size
let len = text.len();
let ptr = text.as_ptr();
let usize_bytes = mem::size_of::<usize>();
// search to an aligned boundary
let end_align = (ptr as usize + len) & (usize_bytes - 1);
let mut offset;
if end_align > 0 {
offset = if end_align >= len { 0 } else { len - end_align };
if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
return Some(offset + index);
}
} else {
offset = len;
}
// search the body of the text
let repeated_x = repeat_byte(x);
while offset >= 2 * usize_bytes {
unsafe {
let u = *(ptr.offset(offset as isize - 2 * usize_bytes as isize) as *const usize);
let v = *(ptr.offset(offset as isize - usize_bytes as isize) as *const usize);
// break if there is a matching byte
let zu = contains_zero_byte(u ^ repeated_x);
let zv = contains_zero_byte(v ^ repeated_x);
if zu || zv {
break;
}
}
offset -= 2 * usize_bytes;
}
// find the byte before the point the body loop stopped
text[..offset].iter().rposition(|elt| *elt == x)
}
// test fallback implementations on all platforms
#[test]
fn matches_one() {
assert_eq!(Some(0), memchr(b'a', b"a"));
}
#[test]
fn matches_begin() {
assert_eq!(Some(0), memchr(b'a', b"aaaa"));
}
#[test]
fn matches_end() {
assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
}
#[test]
fn matches_nul() {
assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
}
#[test]
fn matches_past_nul() {
assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
}
#[test]
fn no_match_empty() {
assert_eq!(None, memchr(b'a', b""));
}
#[test]
fn no_match() {
assert_eq!(None, memchr(b'a', b"xyz"));
}
#[test]
fn matches_one_reversed() {
assert_eq!(Some(0), memrchr(b'a', b"a"));
}
#[test]
fn matches_begin_reversed() {
assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
}
#[test]
fn matches_end_reversed() {
assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
}
#[test]
fn matches_nul_reversed() {
assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
}
#[test]
fn matches_past_nul_reversed() {
assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
}
#[test]
fn no_match_empty_reversed() {
assert_eq!(None, memrchr(b'a', b""));
}
#[test]
fn no_match_reversed() {
assert_eq!(None, memrchr(b'a', b"xyz"));
}
#[test]
fn each_alignment_reversed() {
let mut data = [1u8; 64];
let needle = 2;
let pos = 40;
data[pos] = needle;
for start in 0..16 {
assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
}
}
}

View file

@ -1,122 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs)]
use sync::Once;
use sys;
macro_rules! rtabort {
($($t:tt)*) => (::sys_common::util::abort(format_args!($($t)*)))
}
macro_rules! rtassert {
($e:expr) => ({
if !$e {
rtabort!(concat!("assertion failed: ", stringify!($e)))
}
})
}
pub mod at_exit_imp;
#[cfg(any(not(cargobuild), feature = "backtrace"))]
pub mod backtrace;
pub mod condvar;
pub mod io;
pub mod memchr;
pub mod mutex;
pub mod poison;
pub mod remutex;
pub mod rwlock;
pub mod thread;
pub mod thread_info;
pub mod thread_local;
pub mod util;
pub mod wtf8;
#[cfg(redox)]
pub use sys::net;
#[cfg(not(redox))]
pub mod net;
#[cfg(any(not(cargobuild), feature = "backtrace"))]
#[cfg(any(all(unix, not(any(target_os = "macos", target_os = "ios", target_os = "emscripten"))),
all(windows, target_env = "gnu")))]
pub mod gnu;
// common error constructors
/// A trait for viewing representations from std types
#[doc(hidden)]
pub trait AsInner<Inner: ?Sized> {
fn as_inner(&self) -> &Inner;
}
/// A trait for viewing representations from std types
#[doc(hidden)]
pub trait AsInnerMut<Inner: ?Sized> {
fn as_inner_mut(&mut self) -> &mut Inner;
}
/// A trait for extracting representations from std types
#[doc(hidden)]
pub trait IntoInner<Inner> {
fn into_inner(self) -> Inner;
}
/// A trait for creating std types from internal representations
#[doc(hidden)]
pub trait FromInner<Inner> {
fn from_inner(inner: Inner) -> Self;
}
/// Enqueues a procedure to run when the main thread exits.
///
/// Currently these closures are only run once the main *Rust* thread exits.
/// Once the `at_exit` handlers begin running, more may be enqueued, but not
/// infinitely so. Eventually a handler registration will be forced to fail.
///
/// Returns `Ok` if the handler was successfully registered, meaning that the
/// closure will be run once the main thread exits. Returns `Err` to indicate
/// that the closure could not be registered, meaning that it is not scheduled
/// to be run.
pub fn at_exit<F: FnOnce() + Send + 'static>(f: F) -> Result<(), ()> {
if at_exit_imp::push(Box::new(f)) {Ok(())} else {Err(())}
}
/// One-time runtime cleanup.
pub fn cleanup() {
static CLEANUP: Once = Once::new();
CLEANUP.call_once(|| unsafe {
sys::args::cleanup();
sys::stack_overflow::cleanup();
at_exit_imp::cleanup();
});
}
// Computes (value*numer)/denom without overflow, as long as both
// (numer*denom) and the overall result fit into i64 (which is the case
// for our time conversions).
#[allow(dead_code)] // not used on all platforms
pub fn mul_div_u64(value: u64, numer: u64, denom: u64) -> u64 {
let q = value / denom;
let r = value % denom;
// Decompose value as (value/denom*denom + value%denom),
// substitute into (value*numer)/denom and simplify.
// r < denom, so (denom*numer) is the upper bound of (r*numer)
q * numer + r * numer / denom
}
#[test]
fn test_muldiv() {
assert_eq!(mul_div_u64( 1_000_000_000_001, 1_000_000_000, 1_000_000),
1_000_000_000_001_000);
}

View file

@ -1,66 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use sys::mutex as imp;
/// An OS-based mutual exclusion lock.
///
/// This is the thinnest cross-platform wrapper around OS mutexes. All usage of
/// this mutex is unsafe and it is recommended to instead use the safe wrapper
/// at the top level of the crate instead of this type.
pub struct Mutex(imp::Mutex);
unsafe impl Sync for Mutex {}
impl Mutex {
/// Creates a new mutex for use.
///
/// Behavior is undefined if the mutex is moved after it is
/// first used with any of the functions below.
pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
/// Prepare the mutex for use.
///
/// This should be called once the mutex is at a stable memory address.
#[inline]
pub unsafe fn init(&mut self) { self.0.init() }
/// Locks the mutex blocking the current thread until it is available.
///
/// Behavior is undefined if the mutex has been moved between this and any
/// previous function call.
#[inline]
pub unsafe fn lock(&self) { self.0.lock() }
/// Attempts to lock the mutex without blocking, returning whether it was
/// successfully acquired or not.
///
/// Behavior is undefined if the mutex has been moved between this and any
/// previous function call.
#[inline]
pub unsafe fn try_lock(&self) -> bool { self.0.try_lock() }
/// Unlocks the mutex.
///
/// Behavior is undefined if the current thread does not actually hold the
/// mutex.
#[inline]
pub unsafe fn unlock(&self) { self.0.unlock() }
/// Deallocates all resources associated with this mutex.
///
/// Behavior is undefined if there are current or will be future users of
/// this mutex.
#[inline]
pub unsafe fn destroy(&self) { self.0.destroy() }
}
// not meant to be exported to the outside world, just the containing module
pub fn raw(mutex: &Mutex) -> &imp::Mutex { &mutex.0 }

View file

@ -1,633 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cmp;
use ffi::CString;
use fmt;
use io::{self, Error, ErrorKind};
use libc::{c_int, c_void};
use mem;
use net::{SocketAddr, Shutdown, Ipv4Addr, Ipv6Addr};
use ptr;
use sys::net::{cvt, cvt_r, cvt_gai, Socket, init, wrlen_t};
use sys::net::netc as c;
use sys_common::{AsInner, FromInner, IntoInner};
use time::Duration;
#[cfg(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
target_os = "solaris", target_os = "haiku"))]
use sys::net::netc::IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP;
#[cfg(not(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
target_os = "solaris", target_os = "haiku")))]
use sys::net::netc::IPV6_ADD_MEMBERSHIP;
#[cfg(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
target_os = "solaris", target_os = "haiku"))]
use sys::net::netc::IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP;
#[cfg(not(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
target_os = "solaris", target_os = "haiku")))]
use sys::net::netc::IPV6_DROP_MEMBERSHIP;
#[cfg(any(target_os = "linux", target_os = "android",
target_os = "dragonfly", target_os = "freebsd",
target_os = "openbsd", target_os = "netbsd",
target_os = "haiku", target_os = "bitrig"))]
use libc::MSG_NOSIGNAL;
#[cfg(not(any(target_os = "linux", target_os = "android",
target_os = "dragonfly", target_os = "freebsd",
target_os = "openbsd", target_os = "netbsd",
target_os = "haiku", target_os = "bitrig")))]
const MSG_NOSIGNAL: c_int = 0x0;
////////////////////////////////////////////////////////////////////////////////
// sockaddr and misc bindings
////////////////////////////////////////////////////////////////////////////////
pub fn setsockopt<T>(sock: &Socket, opt: c_int, val: c_int,
payload: T) -> io::Result<()> {
unsafe {
let payload = &payload as *const T as *const c_void;
cvt(c::setsockopt(*sock.as_inner(), opt, val, payload,
mem::size_of::<T>() as c::socklen_t))?;
Ok(())
}
}
pub fn getsockopt<T: Copy>(sock: &Socket, opt: c_int,
val: c_int) -> io::Result<T> {
unsafe {
let mut slot: T = mem::zeroed();
let mut len = mem::size_of::<T>() as c::socklen_t;
cvt(c::getsockopt(*sock.as_inner(), opt, val,
&mut slot as *mut _ as *mut _,
&mut len))?;
assert_eq!(len as usize, mem::size_of::<T>());
Ok(slot)
}
}
fn sockname<F>(f: F) -> io::Result<SocketAddr>
where F: FnOnce(*mut c::sockaddr, *mut c::socklen_t) -> c_int
{
unsafe {
let mut storage: c::sockaddr_storage = mem::zeroed();
let mut len = mem::size_of_val(&storage) as c::socklen_t;
cvt(f(&mut storage as *mut _ as *mut _, &mut len))?;
sockaddr_to_addr(&storage, len as usize)
}
}
fn sockaddr_to_addr(storage: &c::sockaddr_storage,
len: usize) -> io::Result<SocketAddr> {
match storage.ss_family as c_int {
c::AF_INET => {
assert!(len as usize >= mem::size_of::<c::sockaddr_in>());
Ok(SocketAddr::V4(FromInner::from_inner(unsafe {
*(storage as *const _ as *const c::sockaddr_in)
})))
}
c::AF_INET6 => {
assert!(len as usize >= mem::size_of::<c::sockaddr_in6>());
Ok(SocketAddr::V6(FromInner::from_inner(unsafe {
*(storage as *const _ as *const c::sockaddr_in6)
})))
}
_ => {
Err(Error::new(ErrorKind::InvalidInput, "invalid argument"))
}
}
}
#[cfg(target_os = "android")]
fn to_ipv6mr_interface(value: u32) -> c_int {
value as c_int
}
#[cfg(not(target_os = "android"))]
fn to_ipv6mr_interface(value: u32) -> ::libc::c_uint {
value as ::libc::c_uint
}
////////////////////////////////////////////////////////////////////////////////
// get_host_addresses
////////////////////////////////////////////////////////////////////////////////
pub struct LookupHost {
original: *mut c::addrinfo,
cur: *mut c::addrinfo,
}
impl Iterator for LookupHost {
type Item = SocketAddr;
fn next(&mut self) -> Option<SocketAddr> {
loop {
unsafe {
let cur = match self.cur.as_ref() {
None => return None,
Some(c) => c,
};
self.cur = cur.ai_next;
match sockaddr_to_addr(mem::transmute(cur.ai_addr),
cur.ai_addrlen as usize)
{
Ok(addr) => return Some(addr),
Err(_) => continue,
}
}
}
}
}
unsafe impl Sync for LookupHost {}
unsafe impl Send for LookupHost {}
impl Drop for LookupHost {
fn drop(&mut self) {
unsafe { c::freeaddrinfo(self.original) }
}
}
pub fn lookup_host(host: &str) -> io::Result<LookupHost> {
init();
let c_host = CString::new(host)?;
let hints = c::addrinfo {
ai_flags: 0,
ai_family: 0,
ai_socktype: c::SOCK_STREAM,
ai_protocol: 0,
ai_addrlen: 0,
ai_addr: ptr::null_mut(),
ai_canonname: ptr::null_mut(),
ai_next: ptr::null_mut()
};
let mut res = ptr::null_mut();
unsafe {
cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints,
&mut res))?;
Ok(LookupHost { original: res, cur: res })
}
}
////////////////////////////////////////////////////////////////////////////////
// TCP streams
////////////////////////////////////////////////////////////////////////////////
pub struct TcpStream {
inner: Socket,
}
impl TcpStream {
pub fn connect(addr: &SocketAddr) -> io::Result<TcpStream> {
init();
let sock = Socket::new(addr, c::SOCK_STREAM)?;
let (addrp, len) = addr.into_inner();
cvt_r(|| unsafe { c::connect(*sock.as_inner(), addrp, len) })?;
Ok(TcpStream { inner: sock })
}
pub fn socket(&self) -> &Socket { &self.inner }
pub fn into_socket(self) -> Socket { self.inner }
pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.inner.set_timeout(dur, c::SO_RCVTIMEO)
}
pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.inner.set_timeout(dur, c::SO_SNDTIMEO)
}
pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
self.inner.timeout(c::SO_RCVTIMEO)
}
pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
self.inner.timeout(c::SO_SNDTIMEO)
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.inner.read_to_end(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
let ret = cvt(unsafe {
c::send(*self.inner.as_inner(),
buf.as_ptr() as *const c_void,
len,
MSG_NOSIGNAL)
})?;
Ok(ret as usize)
}
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
sockname(|buf, len| unsafe {
c::getpeername(*self.inner.as_inner(), buf, len)
})
}
pub fn socket_addr(&self) -> io::Result<SocketAddr> {
sockname(|buf, len| unsafe {
c::getsockname(*self.inner.as_inner(), buf, len)
})
}
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.inner.shutdown(how)
}
pub fn duplicate(&self) -> io::Result<TcpStream> {
self.inner.duplicate().map(|s| TcpStream { inner: s })
}
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
self.inner.set_nodelay(nodelay)
}
pub fn nodelay(&self) -> io::Result<bool> {
self.inner.nodelay()
}
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
}
pub fn ttl(&self) -> io::Result<u32> {
let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
Ok(raw as u32)
}
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.inner.set_nonblocking(nonblocking)
}
}
impl FromInner<Socket> for TcpStream {
fn from_inner(socket: Socket) -> TcpStream {
TcpStream { inner: socket }
}
}
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut res = f.debug_struct("TcpStream");
if let Ok(addr) = self.socket_addr() {
res.field("addr", &addr);
}
if let Ok(peer) = self.peer_addr() {
res.field("peer", &peer);
}
let name = if cfg!(windows) {"socket"} else {"fd"};
res.field(name, &self.inner.as_inner())
.finish()
}
}
////////////////////////////////////////////////////////////////////////////////
// TCP listeners
////////////////////////////////////////////////////////////////////////////////
pub struct TcpListener {
inner: Socket,
}
impl TcpListener {
pub fn bind(addr: &SocketAddr) -> io::Result<TcpListener> {
init();
let sock = Socket::new(addr, c::SOCK_STREAM)?;
// On platforms with Berkeley-derived sockets, this allows
// to quickly rebind a socket, without needing to wait for
// the OS to clean up the previous one.
if !cfg!(windows) {
setsockopt(&sock, c::SOL_SOCKET, c::SO_REUSEADDR,
1 as c_int)?;
}
// Bind our new socket
let (addrp, len) = addr.into_inner();
cvt(unsafe { c::bind(*sock.as_inner(), addrp, len) })?;
// Start listening
cvt(unsafe { c::listen(*sock.as_inner(), 128) })?;
Ok(TcpListener { inner: sock })
}
pub fn socket(&self) -> &Socket { &self.inner }
pub fn into_socket(self) -> Socket { self.inner }
pub fn socket_addr(&self) -> io::Result<SocketAddr> {
sockname(|buf, len| unsafe {
c::getsockname(*self.inner.as_inner(), buf, len)
})
}
pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
let mut storage: c::sockaddr_storage = unsafe { mem::zeroed() };
let mut len = mem::size_of_val(&storage) as c::socklen_t;
let sock = self.inner.accept(&mut storage as *mut _ as *mut _,
&mut len)?;
let addr = sockaddr_to_addr(&storage, len as usize)?;
Ok((TcpStream { inner: sock, }, addr))
}
pub fn duplicate(&self) -> io::Result<TcpListener> {
self.inner.duplicate().map(|s| TcpListener { inner: s })
}
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
}
pub fn ttl(&self) -> io::Result<u32> {
let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
Ok(raw as u32)
}
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
setsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_V6ONLY, only_v6 as c_int)
}
pub fn only_v6(&self) -> io::Result<bool> {
let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_V6ONLY)?;
Ok(raw != 0)
}
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.inner.set_nonblocking(nonblocking)
}
}
impl FromInner<Socket> for TcpListener {
fn from_inner(socket: Socket) -> TcpListener {
TcpListener { inner: socket }
}
}
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut res = f.debug_struct("TcpListener");
if let Ok(addr) = self.socket_addr() {
res.field("addr", &addr);
}
let name = if cfg!(windows) {"socket"} else {"fd"};
res.field(name, &self.inner.as_inner())
.finish()
}
}
////////////////////////////////////////////////////////////////////////////////
// UDP
////////////////////////////////////////////////////////////////////////////////
pub struct UdpSocket {
inner: Socket,
}
impl UdpSocket {
pub fn bind(addr: &SocketAddr) -> io::Result<UdpSocket> {
init();
let sock = Socket::new(addr, c::SOCK_DGRAM)?;
let (addrp, len) = addr.into_inner();
cvt(unsafe { c::bind(*sock.as_inner(), addrp, len) })?;
Ok(UdpSocket { inner: sock })
}
pub fn socket(&self) -> &Socket { &self.inner }
pub fn into_socket(self) -> Socket { self.inner }
pub fn socket_addr(&self) -> io::Result<SocketAddr> {
sockname(|buf, len| unsafe {
c::getsockname(*self.inner.as_inner(), buf, len)
})
}
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
let mut storage: c::sockaddr_storage = unsafe { mem::zeroed() };
let mut addrlen = mem::size_of_val(&storage) as c::socklen_t;
let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
let n = cvt(unsafe {
c::recvfrom(*self.inner.as_inner(),
buf.as_mut_ptr() as *mut c_void,
len, 0,
&mut storage as *mut _ as *mut _, &mut addrlen)
})?;
Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
}
pub fn send_to(&self, buf: &[u8], dst: &SocketAddr) -> io::Result<usize> {
let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
let (dstp, dstlen) = dst.into_inner();
let ret = cvt(unsafe {
c::sendto(*self.inner.as_inner(),
buf.as_ptr() as *const c_void, len,
MSG_NOSIGNAL, dstp, dstlen)
})?;
Ok(ret as usize)
}
pub fn duplicate(&self) -> io::Result<UdpSocket> {
self.inner.duplicate().map(|s| UdpSocket { inner: s })
}
pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.inner.set_timeout(dur, c::SO_RCVTIMEO)
}
pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.inner.set_timeout(dur, c::SO_SNDTIMEO)
}
pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
self.inner.timeout(c::SO_RCVTIMEO)
}
pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
self.inner.timeout(c::SO_SNDTIMEO)
}
pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> {
setsockopt(&self.inner, c::SOL_SOCKET, c::SO_BROADCAST, broadcast as c_int)
}
pub fn broadcast(&self) -> io::Result<bool> {
let raw: c_int = getsockopt(&self.inner, c::SOL_SOCKET, c::SO_BROADCAST)?;
Ok(raw != 0)
}
pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> {
setsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_LOOP, multicast_loop_v4 as c_int)
}
pub fn multicast_loop_v4(&self) -> io::Result<bool> {
let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_LOOP)?;
Ok(raw != 0)
}
pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> {
setsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_TTL, multicast_ttl_v4 as c_int)
}
pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_TTL)?;
Ok(raw as u32)
}
pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> {
setsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_MULTICAST_LOOP, multicast_loop_v6 as c_int)
}
pub fn multicast_loop_v6(&self) -> io::Result<bool> {
let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_MULTICAST_LOOP)?;
Ok(raw != 0)
}
pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr)
-> io::Result<()> {
let mreq = c::ip_mreq {
imr_multiaddr: *multiaddr.as_inner(),
imr_interface: *interface.as_inner(),
};
setsockopt(&self.inner, c::IPPROTO_IP, c::IP_ADD_MEMBERSHIP, mreq)
}
pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32)
-> io::Result<()> {
let mreq = c::ipv6_mreq {
ipv6mr_multiaddr: *multiaddr.as_inner(),
ipv6mr_interface: to_ipv6mr_interface(interface),
};
setsockopt(&self.inner, c::IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, mreq)
}
pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr)
-> io::Result<()> {
let mreq = c::ip_mreq {
imr_multiaddr: *multiaddr.as_inner(),
imr_interface: *interface.as_inner(),
};
setsockopt(&self.inner, c::IPPROTO_IP, c::IP_DROP_MEMBERSHIP, mreq)
}
pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32)
-> io::Result<()> {
let mreq = c::ipv6_mreq {
ipv6mr_multiaddr: *multiaddr.as_inner(),
ipv6mr_interface: to_ipv6mr_interface(interface),
};
setsockopt(&self.inner, c::IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP, mreq)
}
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
}
pub fn ttl(&self) -> io::Result<u32> {
let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
Ok(raw as u32)
}
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.inner.set_nonblocking(nonblocking)
}
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
let ret = cvt(unsafe {
c::send(*self.inner.as_inner(),
buf.as_ptr() as *const c_void,
len,
MSG_NOSIGNAL)
})?;
Ok(ret as usize)
}
pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> {
let (addrp, len) = addr.into_inner();
cvt_r(|| unsafe { c::connect(*self.inner.as_inner(), addrp, len) }).map(|_| ())
}
}
impl FromInner<Socket> for UdpSocket {
fn from_inner(socket: Socket) -> UdpSocket {
UdpSocket { inner: socket }
}
}
impl fmt::Debug for UdpSocket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut res = f.debug_struct("UdpSocket");
if let Ok(addr) = self.socket_addr() {
res.field("addr", &addr);
}
let name = if cfg!(windows) {"socket"} else {"fd"};
res.field(name, &self.inner.as_inner())
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use collections::HashMap;
#[test]
fn no_lookup_host_duplicates() {
let mut addrs = HashMap::new();
let lh = match lookup_host("localhost") {
Ok(lh) => lh,
Err(e) => panic!("couldn't resolve `localhost': {}", e)
};
let _na = lh.map(|sa| *addrs.entry(sa).or_insert(0) += 1).count();
assert!(addrs.values().filter(|&&v| v > 1).count() == 0);
}
}

View file

@ -1,199 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::{Error};
use fmt;
use sync::atomic::{AtomicBool, Ordering};
use thread;
pub struct Flag { failed: AtomicBool }
// Note that the Ordering uses to access the `failed` field of `Flag` below is
// always `Relaxed`, and that's because this isn't actually protecting any data,
// it's just a flag whether we've panicked or not.
//
// The actual location that this matters is when a mutex is **locked** which is
// where we have external synchronization ensuring that we see memory
// reads/writes to this flag.
//
// As a result, if it matters, we should see the correct value for `failed` in
// all cases.
impl Flag {
pub const fn new() -> Flag {
Flag { failed: AtomicBool::new(false) }
}
#[inline]
pub fn borrow(&self) -> LockResult<Guard> {
let ret = Guard { panicking: thread::panicking() };
if self.get() {
Err(PoisonError::new(ret))
} else {
Ok(ret)
}
}
#[inline]
pub fn done(&self, guard: &Guard) {
if !guard.panicking && thread::panicking() {
self.failed.store(true, Ordering::Relaxed);
}
}
#[inline]
pub fn get(&self) -> bool {
self.failed.load(Ordering::Relaxed)
}
}
pub struct Guard {
panicking: bool,
}
/// A type of error which can be returned whenever a lock is acquired.
///
/// Both Mutexes and RwLocks are poisoned whenever a thread fails while the lock
/// is held. The precise semantics for when a lock is poisoned is documented on
/// each lock, but once a lock is poisoned then all future acquisitions will
/// return this error.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct PoisonError<T> {
guard: T,
}
/// An enumeration of possible errors which can occur while calling the
/// `try_lock` method.
#[stable(feature = "rust1", since = "1.0.0")]
pub enum TryLockError<T> {
/// The lock could not be acquired because another thread failed while holding
/// the lock.
#[stable(feature = "rust1", since = "1.0.0")]
Poisoned(#[stable(feature = "rust1", since = "1.0.0")] PoisonError<T>),
/// The lock could not be acquired at this time because the operation would
/// otherwise block.
#[stable(feature = "rust1", since = "1.0.0")]
WouldBlock,
}
/// A type alias for the result of a lock method which can be poisoned.
///
/// The `Ok` variant of this result indicates that the primitive was not
/// poisoned, and the `Guard` is contained within. The `Err` variant indicates
/// that the primitive was poisoned. Note that the `Err` variant *also* carries
/// the associated guard, and it can be acquired through the `into_inner`
/// method.
#[stable(feature = "rust1", since = "1.0.0")]
pub type LockResult<Guard> = Result<Guard, PoisonError<Guard>>;
/// A type alias for the result of a nonblocking locking method.
///
/// For more information, see `LockResult`. A `TryLockResult` doesn't
/// necessarily hold the associated guard in the `Err` type as the lock may not
/// have been acquired for other reasons.
#[stable(feature = "rust1", since = "1.0.0")]
pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>;
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Debug for PoisonError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"PoisonError { inner: .. }".fmt(f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Display for PoisonError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"poisoned lock: another task failed inside".fmt(f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Error for PoisonError<T> {
fn description(&self) -> &str {
"poisoned lock: another task failed inside"
}
}
impl<T> PoisonError<T> {
/// Creates a `PoisonError`.
#[stable(feature = "sync_poison", since = "1.2.0")]
pub fn new(guard: T) -> PoisonError<T> {
PoisonError { guard: guard }
}
/// Consumes this error indicating that a lock is poisoned, returning the
/// underlying guard to allow access regardless.
#[stable(feature = "sync_poison", since = "1.2.0")]
pub fn into_inner(self) -> T { self.guard }
/// Reaches into this error indicating that a lock is poisoned, returning a
/// reference to the underlying guard to allow access regardless.
#[stable(feature = "sync_poison", since = "1.2.0")]
pub fn get_ref(&self) -> &T { &self.guard }
/// Reaches into this error indicating that a lock is poisoned, returning a
/// mutable reference to the underlying guard to allow access regardless.
#[stable(feature = "sync_poison", since = "1.2.0")]
pub fn get_mut(&mut self) -> &mut T { &mut self.guard }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> From<PoisonError<T>> for TryLockError<T> {
fn from(err: PoisonError<T>) -> TryLockError<T> {
TryLockError::Poisoned(err)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Debug for TryLockError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f),
TryLockError::WouldBlock => "WouldBlock".fmt(f)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Display for TryLockError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
TryLockError::Poisoned(..) => "poisoned lock: another task failed inside",
TryLockError::WouldBlock => "try_lock failed because the operation would block"
}.fmt(f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Error for TryLockError<T> {
fn description(&self) -> &str {
match *self {
TryLockError::Poisoned(ref p) => p.description(),
TryLockError::WouldBlock => "try_lock failed because the operation would block"
}
}
fn cause(&self) -> Option<&Error> {
match *self {
TryLockError::Poisoned(ref p) => Some(p),
_ => None
}
}
}
pub fn map_result<T, U, F>(result: LockResult<T>, f: F)
-> LockResult<U>
where F: FnOnce(T) -> U {
match result {
Ok(t) => Ok(f(t)),
Err(PoisonError { guard }) => Err(PoisonError::new(f(guard)))
}
}

View file

@ -1,236 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use fmt;
use marker;
use ops::Deref;
use sys_common::poison::{self, TryLockError, TryLockResult, LockResult};
use sys::mutex as sys;
/// A re-entrant mutual exclusion
///
/// This mutex will block *other* threads waiting for the lock to become
/// available. The thread which has already locked the mutex can lock it
/// multiple times without blocking, preventing a common source of deadlocks.
pub struct ReentrantMutex<T> {
inner: Box<sys::ReentrantMutex>,
poison: poison::Flag,
data: T,
}
unsafe impl<T: Send> Send for ReentrantMutex<T> {}
unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be accessed through this guard via its
/// Deref implementation.
///
/// # Mutability
///
/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
/// because implementation of the trait would violate Rusts reference aliasing
/// rules. Use interior mutability (usually `RefCell`) in order to mutate the
/// guarded data.
#[must_use]
pub struct ReentrantMutexGuard<'a, T: 'a> {
// funny underscores due to how Deref currently works (it disregards field
// privacy).
__lock: &'a ReentrantMutex<T>,
__poison: poison::Guard,
}
impl<'a, T> !marker::Send for ReentrantMutexGuard<'a, T> {}
impl<T> ReentrantMutex<T> {
/// Creates a new reentrant mutex in an unlocked state.
pub fn new(t: T) -> ReentrantMutex<T> {
unsafe {
let mut mutex = ReentrantMutex {
inner: box sys::ReentrantMutex::uninitialized(),
poison: poison::Flag::new(),
data: t,
};
mutex.inner.init();
mutex
}
}
/// Acquires a mutex, blocking the current thread until it is able to do so.
///
/// This function will block the caller until it is available to acquire the mutex.
/// Upon returning, the thread is the only thread with the mutex held. When the thread
/// calling this method already holds the lock, the call shall succeed without
/// blocking.
///
/// # Errors
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
pub fn lock(&self) -> LockResult<ReentrantMutexGuard<T>> {
unsafe { self.inner.lock() }
ReentrantMutexGuard::new(&self)
}
/// Attempts to acquire this lock.
///
/// If the lock could not be acquired at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned.
///
/// This function does not block.
///
/// # Errors
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
pub fn try_lock(&self) -> TryLockResult<ReentrantMutexGuard<T>> {
if unsafe { self.inner.try_lock() } {
Ok(ReentrantMutexGuard::new(&self)?)
} else {
Err(TryLockError::WouldBlock)
}
}
}
impl<T> Drop for ReentrantMutex<T> {
fn drop(&mut self) {
// This is actually safe b/c we know that there is no further usage of
// this mutex (it's up to the user to arrange for a mutex to get
// dropped, that's not our job)
unsafe { self.inner.destroy() }
}
}
impl<T: fmt::Debug + 'static> fmt::Debug for ReentrantMutex<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Ok(guard) => write!(f, "ReentrantMutex {{ data: {:?} }}", &*guard),
Err(TryLockError::Poisoned(err)) => {
write!(f, "ReentrantMutex {{ data: Poisoned({:?}) }}", &**err.get_ref())
},
Err(TryLockError::WouldBlock) => write!(f, "ReentrantMutex {{ <locked> }}")
}
}
}
impl<'mutex, T> ReentrantMutexGuard<'mutex, T> {
fn new(lock: &'mutex ReentrantMutex<T>)
-> LockResult<ReentrantMutexGuard<'mutex, T>> {
poison::map_result(lock.poison.borrow(), |guard| {
ReentrantMutexGuard {
__lock: lock,
__poison: guard,
}
})
}
}
impl<'mutex, T> Deref for ReentrantMutexGuard<'mutex, T> {
type Target = T;
fn deref(&self) -> &T {
&self.__lock.data
}
}
impl<'a, T> Drop for ReentrantMutexGuard<'a, T> {
#[inline]
fn drop(&mut self) {
unsafe {
self.__lock.poison.done(&self.__poison);
self.__lock.inner.unlock();
}
}
}
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use cell::RefCell;
use sync::Arc;
use thread;
#[test]
fn smoke() {
let m = ReentrantMutex::new(());
{
let a = m.lock().unwrap();
{
let b = m.lock().unwrap();
{
let c = m.lock().unwrap();
assert_eq!(*c, ());
}
assert_eq!(*b, ());
}
assert_eq!(*a, ());
}
}
#[test]
fn is_mutex() {
let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
let m2 = m.clone();
let lock = m.lock().unwrap();
let child = thread::spawn(move || {
let lock = m2.lock().unwrap();
assert_eq!(*lock.borrow(), 4950);
});
for i in 0..100 {
let lock = m.lock().unwrap();
*lock.borrow_mut() += i;
}
drop(lock);
child.join().unwrap();
}
#[test]
fn trylock_works() {
let m = Arc::new(ReentrantMutex::new(()));
let m2 = m.clone();
let _lock = m.try_lock().unwrap();
let _lock2 = m.try_lock().unwrap();
thread::spawn(move || {
let lock = m2.try_lock();
assert!(lock.is_err());
}).join().unwrap();
let _lock3 = m.try_lock().unwrap();
}
pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
impl<'a> Drop for Answer<'a> {
fn drop(&mut self) {
*self.0.borrow_mut() = 42;
}
}
#[test]
fn poison_works() {
let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
let mc = m.clone();
let result = thread::spawn(move ||{
let lock = mc.lock().unwrap();
*lock.borrow_mut() = 1;
let lock2 = mc.lock().unwrap();
*lock.borrow_mut() = 2;
let _answer = Answer(lock2);
panic!("What the answer to my lifetimes dilemma is?");
}).join();
assert!(result.is_err());
let r = m.lock().err().unwrap().into_inner();
assert_eq!(*r.borrow(), 42);
}
}

View file

@ -1,82 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use sys::rwlock as imp;
/// An OS-based reader-writer lock.
///
/// This structure is entirely unsafe and serves as the lowest layer of a
/// cross-platform binding of system rwlocks. It is recommended to use the
/// safer types at the top level of this crate instead of this type.
pub struct RWLock(imp::RWLock);
impl RWLock {
/// Creates a new reader-writer lock for use.
///
/// Behavior is undefined if the reader-writer lock is moved after it is
/// first used with any of the functions below.
pub const fn new() -> RWLock { RWLock(imp::RWLock::new()) }
/// Acquires shared access to the underlying lock, blocking the current
/// thread to do so.
///
/// Behavior is undefined if the rwlock has been moved between this and any
/// previous method call.
#[inline]
pub unsafe fn read(&self) { self.0.read() }
/// Attempts to acquire shared access to this lock, returning whether it
/// succeeded or not.
///
/// This function does not block the current thread.
///
/// Behavior is undefined if the rwlock has been moved between this and any
/// previous method call.
#[inline]
pub unsafe fn try_read(&self) -> bool { self.0.try_read() }
/// Acquires write access to the underlying lock, blocking the current thread
/// to do so.
///
/// Behavior is undefined if the rwlock has been moved between this and any
/// previous method call.
#[inline]
pub unsafe fn write(&self) { self.0.write() }
/// Attempts to acquire exclusive access to this lock, returning whether it
/// succeeded or not.
///
/// This function does not block the current thread.
///
/// Behavior is undefined if the rwlock has been moved between this and any
/// previous method call.
#[inline]
pub unsafe fn try_write(&self) -> bool { self.0.try_write() }
/// Unlocks previously acquired shared access to this lock.
///
/// Behavior is undefined if the current thread does not have shared access.
#[inline]
pub unsafe fn read_unlock(&self) { self.0.read_unlock() }
/// Unlocks previously acquired exclusive access to this lock.
///
/// Behavior is undefined if the current thread does not currently have
/// exclusive access.
#[inline]
pub unsafe fn write_unlock(&self) { self.0.write_unlock() }
/// Destroys OS-related resources with this RWLock.
///
/// Behavior is undefined if there are any currently active users of this
/// lock.
#[inline]
pub unsafe fn destroy(&self) { self.0.destroy() }
}

View file

@ -1,22 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use alloc::boxed::FnBox;
use libc;
use sys::stack_overflow;
pub unsafe fn start_thread(main: *mut libc::c_void) {
// Next, set up our stack overflow handler which may get triggered if we run
// out of stack.
let _handler = stack_overflow::Handler::new();
// Finally, let's run some code.
Box::from_raw(main as *mut Box<FnBox()>)()
}

View file

@ -1,61 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)] // stack_guard isn't used right now on all platforms
use cell::RefCell;
use thread::Thread;
use thread::LocalKeyState;
struct ThreadInfo {
stack_guard: Option<usize>,
thread: Thread,
}
thread_local! { static THREAD_INFO: RefCell<Option<ThreadInfo>> = RefCell::new(None) }
impl ThreadInfo {
fn with<R, F>(f: F) -> Option<R> where F: FnOnce(&mut ThreadInfo) -> R {
if THREAD_INFO.state() == LocalKeyState::Destroyed {
return None
}
THREAD_INFO.with(move |c| {
if c.borrow().is_none() {
*c.borrow_mut() = Some(ThreadInfo {
stack_guard: None,
thread: NewThread::new(None),
})
}
Some(f(c.borrow_mut().as_mut().unwrap()))
})
}
}
pub fn current_thread() -> Option<Thread> {
ThreadInfo::with(|info| info.thread.clone())
}
pub fn stack_guard() -> Option<usize> {
ThreadInfo::with(|info| info.stack_guard).and_then(|o| o)
}
pub fn set(stack_guard: Option<usize>, thread: Thread) {
THREAD_INFO.with(|c| assert!(c.borrow().is_none()));
THREAD_INFO.with(move |c| *c.borrow_mut() = Some(ThreadInfo{
stack_guard: stack_guard,
thread: thread,
}));
}
// a hack to get around privacy restrictions; implemented by `std::thread`
pub trait NewThread {
fn new(name: Option<String>) -> Self;
}

View file

@ -1,270 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! OS-based thread local storage
//!
//! This module provides an implementation of OS-based thread local storage,
//! using the native OS-provided facilities (think `TlsAlloc` or
//! `pthread_setspecific`). The interface of this differs from the other types
//! of thread-local-storage provided in this crate in that OS-based TLS can only
//! get/set pointers,
//!
//! This module also provides two flavors of TLS. One is intended for static
//! initialization, and does not contain a `Drop` implementation to deallocate
//! the OS-TLS key. The other is a type which does implement `Drop` and hence
//! has a safe interface.
//!
//! # Usage
//!
//! This module should likely not be used directly unless other primitives are
//! being built on. types such as `thread_local::spawn::Key` are likely much
//! more useful in practice than this OS-based version which likely requires
//! unsafe code to interoperate with.
//!
//! # Examples
//!
//! Using a dynamically allocated TLS key. Note that this key can be shared
//! among many threads via an `Arc`.
//!
//! ```rust,ignore
//! let key = Key::new(None);
//! assert!(key.get().is_null());
//! key.set(1 as *mut u8);
//! assert!(!key.get().is_null());
//!
//! drop(key); // deallocate this TLS slot.
//! ```
//!
//! Sometimes a statically allocated key is either required or easier to work
//! with, however.
//!
//! ```rust,ignore
//! static KEY: StaticKey = INIT;
//!
//! unsafe {
//! assert!(KEY.get().is_null());
//! KEY.set(1 as *mut u8);
//! }
//! ```
#![allow(non_camel_case_types)]
#![unstable(feature = "thread_local_internals", issue = "0")]
#![allow(dead_code)] // sys isn't exported yet
use sync::atomic::{self, AtomicUsize, Ordering};
use sys::thread_local as imp;
/// A type for TLS keys that are statically allocated.
///
/// This type is entirely `unsafe` to use as it does not protect against
/// use-after-deallocation or use-during-deallocation.
///
/// The actual OS-TLS key is lazily allocated when this is used for the first
/// time. The key is also deallocated when the Rust runtime exits or `destroy`
/// is called, whichever comes first.
///
/// # Examples
///
/// ```ignore
/// use tls::os::{StaticKey, INIT};
///
/// static KEY: StaticKey = INIT;
///
/// unsafe {
/// assert!(KEY.get().is_null());
/// KEY.set(1 as *mut u8);
/// }
/// ```
pub struct StaticKey {
/// Inner static TLS key (internals).
key: AtomicUsize,
/// Destructor for the TLS value.
///
/// See `Key::new` for information about when the destructor runs and how
/// it runs.
dtor: Option<unsafe extern fn(*mut u8)>,
}
/// A type for a safely managed OS-based TLS slot.
///
/// This type allocates an OS TLS key when it is initialized and will deallocate
/// the key when it falls out of scope. When compared with `StaticKey`, this
/// type is entirely safe to use.
///
/// Implementations will likely, however, contain unsafe code as this type only
/// operates on `*mut u8`, a raw pointer.
///
/// # Examples
///
/// ```rust,ignore
/// use tls::os::Key;
///
/// let key = Key::new(None);
/// assert!(key.get().is_null());
/// key.set(1 as *mut u8);
/// assert!(!key.get().is_null());
///
/// drop(key); // deallocate this TLS slot.
/// ```
pub struct Key {
key: imp::Key,
}
/// Constant initialization value for static TLS keys.
///
/// This value specifies no destructor by default.
pub const INIT: StaticKey = StaticKey::new(None);
impl StaticKey {
pub const fn new(dtor: Option<unsafe extern fn(*mut u8)>) -> StaticKey {
StaticKey {
key: atomic::AtomicUsize::new(0),
dtor: dtor
}
}
/// Gets the value associated with this TLS key
///
/// This will lazily allocate a TLS key from the OS if one has not already
/// been allocated.
#[inline]
pub unsafe fn get(&self) -> *mut u8 { imp::get(self.key()) }
/// Sets this TLS key to a new value.
///
/// This will lazily allocate a TLS key from the OS if one has not already
/// been allocated.
#[inline]
pub unsafe fn set(&self, val: *mut u8) { imp::set(self.key(), val) }
/// Deallocates this OS TLS key.
///
/// This function is unsafe as there is no guarantee that the key is not
/// currently in use by other threads or will not ever be used again.
///
/// Note that this does *not* run the user-provided destructor if one was
/// specified at definition time. Doing so must be done manually.
pub unsafe fn destroy(&self) {
match self.key.swap(0, Ordering::SeqCst) {
0 => {}
n => { imp::destroy(n as imp::Key) }
}
}
#[inline]
unsafe fn key(&self) -> imp::Key {
match self.key.load(Ordering::Relaxed) {
0 => self.lazy_init() as imp::Key,
n => n as imp::Key
}
}
unsafe fn lazy_init(&self) -> usize {
// POSIX allows the key created here to be 0, but the compare_and_swap
// below relies on using 0 as a sentinel value to check who won the
// race to set the shared TLS key. As far as I know, there is no
// guaranteed value that cannot be returned as a posix_key_create key,
// so there is no value we can initialize the inner key with to
// prove that it has not yet been set. As such, we'll continue using a
// value of 0, but with some gyrations to make sure we have a non-0
// value returned from the creation routine.
// FIXME: this is clearly a hack, and should be cleaned up.
let key1 = imp::create(self.dtor);
let key = if key1 != 0 {
key1
} else {
let key2 = imp::create(self.dtor);
imp::destroy(key1);
key2
};
assert!(key != 0);
match self.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
// The CAS succeeded, so we've created the actual key
0 => key as usize,
// If someone beat us to the punch, use their key instead
n => { imp::destroy(key); n }
}
}
}
impl Key {
/// Creates a new managed OS TLS key.
///
/// This key will be deallocated when the key falls out of scope.
///
/// The argument provided is an optionally-specified destructor for the
/// value of this TLS key. When a thread exits and the value for this key
/// is non-null the destructor will be invoked. The TLS value will be reset
/// to null before the destructor is invoked.
///
/// Note that the destructor will not be run when the `Key` goes out of
/// scope.
#[inline]
pub fn new(dtor: Option<unsafe extern fn(*mut u8)>) -> Key {
Key { key: unsafe { imp::create(dtor) } }
}
/// See StaticKey::get
#[inline]
pub fn get(&self) -> *mut u8 {
unsafe { imp::get(self.key) }
}
/// See StaticKey::set
#[inline]
pub fn set(&self, val: *mut u8) {
unsafe { imp::set(self.key, val) }
}
}
impl Drop for Key {
fn drop(&mut self) {
unsafe { imp::destroy(self.key) }
}
}
#[cfg(test)]
mod tests {
use super::{Key, StaticKey};
fn assert_sync<T: Sync>() {}
fn assert_send<T: Send>() {}
#[test]
fn smoke() {
assert_sync::<Key>();
assert_send::<Key>();
let k1 = Key::new(None);
let k2 = Key::new(None);
assert!(k1.get().is_null());
assert!(k2.get().is_null());
k1.set(1 as *mut _);
k2.set(2 as *mut _);
assert_eq!(k1.get() as usize, 1);
assert_eq!(k2.get() as usize, 2);
}
#[test]
fn statik() {
static K1: StaticKey = StaticKey::new(None);
static K2: StaticKey = StaticKey::new(None);
unsafe {
assert!(K1.get().is_null());
assert!(K2.get().is_null());
K1.set(1 as *mut _);
K2.set(2 as *mut _);
assert_eq!(K1.get() as usize, 1);
assert_eq!(K2.get() as usize, 2);
}
}
}

View file

@ -1,82 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use env;
use fmt;
use io::prelude::*;
use sync::atomic::{self, Ordering};
use sys::stdio::Stderr;
use thread;
pub fn min_stack() -> usize {
static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
match MIN.load(Ordering::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = env::var("RUST_MIN_STACK").ok().and_then(|s| s.parse().ok());
let amt = amt.unwrap_or(2 * 1024 * 1024);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
MIN.store(amt + 1, Ordering::SeqCst);
amt
}
pub fn dumb_print(args: fmt::Arguments) {
let _ = Stderr::new().map(|mut stderr| stderr.write_fmt(args));
}
// On Redox, use an illegal instruction
#[cfg(redox)]
unsafe fn abort_internal() -> ! {
::intrinsics::abort()
}
// On Unix-like platforms, libc::abort will unregister signal handlers
// including the SIGABRT handler, preventing the abort from being blocked, and
// fclose streams, with the side effect of flushing them so libc bufferred
// output will be printed. Additionally the shell will generally print a more
// understandable error message like "Abort trap" rather than "Illegal
// instruction" that intrinsics::abort would cause, as intrinsics::abort is
// implemented as an illegal instruction.
#[cfg(unix)]
unsafe fn abort_internal() -> ! {
::libc::abort()
}
// On Windows, use the processor-specific __fastfail mechanism. In Windows 8
// and later, this will terminate the process immediately without running any
// in-process exception handlers. In earlier versions of Windows, this
// sequence of instructions will be treated as an access violation,
// terminating the process but without necessarily bypassing all exception
// handlers.
//
// https://msdn.microsoft.com/en-us/library/dn774154.aspx
#[cfg(all(windows, any(target_arch = "x86", target_arch = "x86_64")))]
unsafe fn abort_internal() -> ! {
asm!("int $$0x29" :: "{ecx}"(7) ::: volatile); // 7 is FAST_FAIL_FATAL_APP_EXIT
::intrinsics::unreachable();
}
// Other platforms should use the appropriate platform-specific mechanism for
// aborting the process. If no platform-specific mechanism is available,
// ::intrinsics::abort() may be used instead. The above implementations cover
// all targets currently supported by libstd.
pub fn abort(args: fmt::Arguments) -> ! {
dumb_print(format_args!("fatal runtime error: {}\n", args));
unsafe { abort_internal(); }
}
#[allow(dead_code)] // stack overflow detection not enabled on all platforms
pub unsafe fn report_overflow() {
dumb_print(format_args!("\nthread '{}' has overflowed its stack\n",
thread::current().name().unwrap_or("<unknown>")));
}

File diff suppressed because it is too large Load diff

45
src/libstd/sys/mod.rs Normal file
View file

@ -0,0 +1,45 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Platform-dependent platform abstraction
//!
//! The `std::sys` module is the abstracted interface through which
//! `std` talks to the underlying operating system. It has different
//! implementations for different operating system families, today
//! just Unix and Windows.
//!
//! The centralization of platform-specific code in this module is
//! enforced by the "platform abstraction layer" tidy script in
//! `tools/tidy/pal.rs`.
//!
//! This module is closely related to the platform-independent system
//! integration code in `std::sys_common`. See that module's
//! documentation for details.
//!
//! In the future it would be desirable for the indepedent
//! implementations of this module to be extracted to their own crates
//! that `std` can link to, thus enabling their implementation
//! out-of-tree via crate replacement. Though due to the complex
//! inter-dependencies within `std` that will be a challenging goal to
//! achieve.
pub use self::imp::*;
#[cfg(redox)]
#[path = "redox/mod.rs"]
mod imp;
#[cfg(unix)]
#[path = "unix/mod.rs"]
mod imp;
#[cfg(windows)]
#[path = "windows/mod.rs"]
mod imp;

View file

@ -0,0 +1,167 @@
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg(target_thread_local)]
#![unstable(feature = "thread_local_internals", issue = "0")]
use cell::{Cell, UnsafeCell};
use intrinsics;
use ptr;
pub struct Key<T> {
inner: UnsafeCell<Option<T>>,
// Metadata to keep track of the state of the destructor. Remember that
// these variables are thread-local, not global.
dtor_registered: Cell<bool>,
dtor_running: Cell<bool>,
}
unsafe impl<T> ::marker::Sync for Key<T> { }
impl<T> Key<T> {
pub const fn new() -> Key<T> {
Key {
inner: UnsafeCell::new(None),
dtor_registered: Cell::new(false),
dtor_running: Cell::new(false)
}
}
pub fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
unsafe {
if intrinsics::needs_drop::<T>() && self.dtor_running.get() {
return None
}
self.register_dtor();
}
Some(&self.inner)
}
unsafe fn register_dtor(&self) {
if !intrinsics::needs_drop::<T>() || self.dtor_registered.get() {
return
}
register_dtor(self as *const _ as *mut u8,
destroy_value::<T>);
self.dtor_registered.set(true);
}
}
#[cfg(any(target_os = "linux", target_os = "fuchsia"))]
unsafe fn register_dtor_fallback(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
// The fallback implementation uses a vanilla OS-based TLS key to track
// the list of destructors that need to be run for this thread. The key
// then has its own destructor which runs all the other destructors.
//
// The destructor for DTORS is a little special in that it has a `while`
// loop to continuously drain the list of registered destructors. It
// *should* be the case that this loop always terminates because we
// provide the guarantee that a TLS key cannot be set after it is
// flagged for destruction.
use sys_common::thread_local as os;
static DTORS: os::StaticKey = os::StaticKey::new(Some(run_dtors));
type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>;
if DTORS.get().is_null() {
let v: Box<List> = box Vec::new();
DTORS.set(Box::into_raw(v) as *mut u8);
}
let list: &mut List = &mut *(DTORS.get() as *mut List);
list.push((t, dtor));
unsafe extern fn run_dtors(mut ptr: *mut u8) {
while !ptr.is_null() {
let list: Box<List> = Box::from_raw(ptr as *mut List);
for &(ptr, dtor) in list.iter() {
dtor(ptr);
}
ptr = DTORS.get();
DTORS.set(ptr::null_mut());
}
}
}
// Since what appears to be glibc 2.18 this symbol has been shipped which
// GCC and clang both use to invoke destructors in thread_local globals, so
// let's do the same!
//
// Note, however, that we run on lots older linuxes, as well as cross
// compiling from a newer linux to an older linux, so we also have a
// fallback implementation to use as well.
//
// Due to rust-lang/rust#18804, make sure this is not generic!
#[cfg(target_os = "linux")]
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
use mem;
use libc;
extern {
#[linkage = "extern_weak"]
static __dso_handle: *mut u8;
#[linkage = "extern_weak"]
static __cxa_thread_atexit_impl: *const libc::c_void;
}
if !__cxa_thread_atexit_impl.is_null() {
type F = unsafe extern fn(dtor: unsafe extern fn(*mut u8),
arg: *mut u8,
dso_handle: *mut u8) -> libc::c_int;
mem::transmute::<*const libc::c_void, F>(__cxa_thread_atexit_impl)
(dtor, t, &__dso_handle as *const _ as *mut _);
return
}
register_dtor_fallback(t, dtor);
}
// OSX's analog of the above linux function is this _tlv_atexit function.
// The disassembly of thread_local globals in C++ (at least produced by
// clang) will have this show up in the output.
#[cfg(target_os = "macos")]
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
extern {
fn _tlv_atexit(dtor: unsafe extern fn(*mut u8),
arg: *mut u8);
}
_tlv_atexit(dtor, t);
}
// Just use the thread_local fallback implementation, at least until there's
// a more direct implementation.
#[cfg(target_os = "fuchsia")]
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
register_dtor_fallback(t, dtor);
}
pub unsafe extern fn destroy_value<T>(ptr: *mut u8) {
let ptr = ptr as *mut Key<T>;
// Right before we run the user destructor be sure to flag the
// destructor as running for this thread so calls to `get` will return
// `None`.
(*ptr).dtor_running.set(true);
// The OSX implementation of TLS apparently had an odd aspect to it
// where the pointer we have may be overwritten while this destructor
// is running. Specifically if a TLS destructor re-accesses TLS it may
// trigger a re-initialization of all TLS variables, paving over at
// least some destroyed ones with initial values.
//
// This means that if we drop a TLS value in place on OSX that we could
// revert the value to its original state halfway through the
// destructor, which would be bad!
//
// Hence, we use `ptr::read` on OSX (to move to a "safe" location)
// instead of drop_in_place.
if cfg!(target_os = "macos") {
ptr::read((*ptr).inner.get());
} else {
ptr::drop_in_place((*ptr).inner.get());
}
}

View file

@ -38,6 +38,7 @@ pub mod backtrace;
pub mod condvar;
pub mod env;
pub mod ext;
pub mod fast_thread_local;
pub mod fd;
pub mod fs;
pub mod memchr;
@ -162,3 +163,14 @@ pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
}
}
}
// On Unix-like platforms, libc::abort will unregister signal handlers
// including the SIGABRT handler, preventing the abort from being blocked, and
// fclose streams, with the side effect of flushing them so libc bufferred
// output will be printed. Additionally the shell will generally print a more
// understandable error message like "Abort trap" rather than "Illegal
// instruction" that intrinsics::abort would cause, as intrinsics::abort is
// implemented as an illegal instruction.
pub unsafe fn abort_internal() -> ! {
::libc::abort()
}

View file

@ -67,3 +67,4 @@ impl io::Write for Stderr {
}
pub const EBADF_ERR: i32 = ::libc::EBADF as i32;
pub const STDIN_BUF_SIZE: usize = ::sys_common::io::DEFAULT_BUF_SIZE;

View file

@ -179,7 +179,7 @@ pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
}
}
trait IsZero {
pub trait IsZero {
fn is_zero(&self) -> bool;
}
@ -193,7 +193,7 @@ macro_rules! impl_is_zero {
impl_is_zero! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
fn cvt<I: IsZero>(i: I) -> io::Result<I> {
pub fn cvt<I: IsZero>(i: I) -> io::Result<I> {
if i.is_zero() {
Err(io::Error::last_os_error())
} else {
@ -201,7 +201,7 @@ fn cvt<I: IsZero>(i: I) -> io::Result<I> {
}
}
fn dur2timeout(dur: Duration) -> c::DWORD {
pub fn dur2timeout(dur: Duration) -> c::DWORD {
// Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the
// timeouts in windows APIs are typically u32 milliseconds. To translate, we
// have two pieces to take care of:
@ -221,3 +221,17 @@ fn dur2timeout(dur: Duration) -> c::DWORD {
}
}).unwrap_or(c::INFINITE)
}
// On Windows, use the processor-specific __fastfail mechanism. In Windows 8
// and later, this will terminate the process immediately without running any
// in-process exception handlers. In earlier versions of Windows, this
// sequence of instructions will be treated as an access violation,
// terminating the process but without necessarily bypassing all exception
// handlers.
//
// https://msdn.microsoft.com/en-us/library/dn774154.aspx
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub unsafe fn abort_internal() -> ! {
asm!("int $$0x29" :: "{ecx}"(7) ::: volatile); // 7 is FAST_FAIL_FATAL_APP_EXIT
::intrinsics::unreachable();
}

View file

@ -207,3 +207,8 @@ fn invalid_encoding() -> io::Error {
}
pub const EBADF_ERR: i32 = ::sys::c::ERROR_INVALID_HANDLE as i32;
// The default buffer capacity is 64k, but apparently windows
// doesn't like 64k reads on stdin. See #13304 for details, but the
// idea is that on windows we use a slightly smaller buffer that's
// been seen to be acceptable.
pub const STDIN_BUF_SIZE: usize = 8 * 1024;