From 81eea9e4312253afb655c051d0bf0661744ab56e Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 21 Jul 2017 09:41:20 -0700 Subject: [PATCH 001/213] Thread through the original error when opening archives This updates the management of opening archives to thread through the original piece of error information from LLVM over to the end consumer, trans. --- src/librustc_llvm/archive_ro.rs | 6 +++--- src/librustc_trans/back/archive.rs | 7 +++---- src/librustc_trans/metadata.rs | 16 ++++++++-------- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/librustc_llvm/archive_ro.rs b/src/librustc_llvm/archive_ro.rs index b3f5f8e53605..0b24e55541b0 100644 --- a/src/librustc_llvm/archive_ro.rs +++ b/src/librustc_llvm/archive_ro.rs @@ -39,14 +39,14 @@ impl ArchiveRO { /// /// If this archive is used with a mutable method, then an error will be /// raised. - pub fn open(dst: &Path) -> Option { + pub fn open(dst: &Path) -> Result { return unsafe { let s = path2cstr(dst); let ar = ::LLVMRustOpenArchive(s.as_ptr()); if ar.is_null() { - None + Err(::last_error().unwrap_or("failed to open archive".to_string())) } else { - Some(ArchiveRO { ptr: ar }) + Ok(ArchiveRO { ptr: ar }) } }; diff --git a/src/librustc_trans/back/archive.rs b/src/librustc_trans/back/archive.rs index 902065c8688d..6ec40bd689c2 100644 --- a/src/librustc_trans/back/archive.rs +++ b/src/librustc_trans/back/archive.rs @@ -126,7 +126,7 @@ impl<'a> ArchiveBuilder<'a> { Some(ref src) => src, None => return None, }; - self.src_archive = Some(ArchiveRO::open(src)); + self.src_archive = Some(ArchiveRO::open(src).ok()); self.src_archive.as_ref().unwrap().as_ref() } @@ -186,9 +186,8 @@ impl<'a> ArchiveBuilder<'a> { where F: FnMut(&str) -> bool + 'static { let archive = match ArchiveRO::open(archive) { - Some(ar) => ar, - None => return Err(io::Error::new(io::ErrorKind::Other, - "failed to open archive")), + Ok(ar) => ar, + Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), }; self.additions.push(Addition::Archive { archive: archive, diff --git a/src/librustc_trans/metadata.rs b/src/librustc_trans/metadata.rs index 2c0148dfbb37..883808c59091 100644 --- a/src/librustc_trans/metadata.rs +++ b/src/librustc_trans/metadata.rs @@ -31,10 +31,10 @@ impl MetadataLoader for LlvmMetadataLoader { // just keeping the archive along while the metadata is in use. let archive = ArchiveRO::open(filename) .map(|ar| OwningRef::new(box ar)) - .ok_or_else(|| { - debug!("llvm didn't like `{}`", filename.display()); - format!("failed to read rlib metadata: '{}'", filename.display()) - })?; + .map_err(|e| { + debug!("llvm didn't like `{}`: {}", filename.display(), e); + format!("failed to read rlib metadata in '{}': {}", filename.display(), e) + })?; let buf: OwningRef<_, [u8]> = archive .try_map(|ar| { ar.iter() @@ -42,10 +42,10 @@ impl MetadataLoader for LlvmMetadataLoader { .find(|sect| sect.name() == Some(METADATA_FILENAME)) .map(|s| s.data()) .ok_or_else(|| { - debug!("didn't find '{}' in the archive", METADATA_FILENAME); - format!("failed to read rlib metadata: '{}'", - filename.display()) - }) + debug!("didn't find '{}' in the archive", METADATA_FILENAME); + format!("failed to read rlib metadata: '{}'", + filename.display()) + }) })?; Ok(buf.erase_owner()) } From 236b7487d525359440815a425cba97fa36903afc Mon Sep 17 00:00:00 2001 From: Tymoteusz Jankowski Date: Sun, 23 Jul 2017 14:18:34 +0200 Subject: [PATCH 002/213] Add simple docs example for struct Cell --- src/libcore/cell.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 35744f3f16b3..7e12d5466c2b 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -187,6 +187,29 @@ use ops::{Deref, DerefMut, CoerceUnsized}; use ptr; /// A mutable memory location. +/// +/// ``` +/// use std::cell::Cell; +/// +/// struct SomeStruct { +/// regular_field: u8, +/// special_field: Cell, +/// } +/// +/// let my_struct = SomeStruct { +/// regular_field: 0, +/// special_field: Cell::new(1), +/// }; +/// +/// let new_value = 100; +/// +/// // ERROR, because my_struct is immutable +/// // immutable.regular_field = new_value; +/// +/// // WORKS, special_field is mutable because it is Cell +/// immutable.special_field.set(new_value); +/// assert_eq!(immutable.special_field.get(), new_value); +/// ``` /// /// See the [module-level documentation](index.html) for more. #[stable(feature = "rust1", since = "1.0.0")] From bf7e91f61da8a5ca74c7d97f6f2bc978c0366256 Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Sun, 23 Jul 2017 13:46:09 -0700 Subject: [PATCH 003/213] field does not exist error: note fields if Levenshtein suggestion fails When trying to access or initialize a nonexistent field, if we can't infer what field was meant (by virtue of the purported field in the source being a small Levenshtein distance away from an actual field, suggestive of a typo), issue a note listing all the available fields. To reduce terminal clutter, we don't issue the note when we have a `find_best_match_for_name` Levenshtein suggestion: the suggestion is probably right. The third argument of the call to `find_best_match_for_name` is changed to `None`, accepting the default maximum Levenshtein distance of one-third of the identifier supplied for correction. The previous value of `Some(name.len())` was overzealous, inappropriately very Levenshtein-distant suggestions when the attempted field access could not plausibly be a mere typo. For example, if a struct has fields `mule` and `phone`, but I type `.donkey`, I'd rather the error have a note listing that the available fields are, in fact, `mule` and `phone` (which is the behavior induced by this patch) rather than the error asking "did you mean `phone`?" (which is the behavior on master). The "only find fits with at least one matching letter" comment was accurate when it was first introduced in 09d992471 (January 2015), but is a vicious lie in its present context before a call to `find_best_match_for_name` and must be destroyed (replacing every letter is a Levenshtein distance of name.len()). The present author claims that this suffices to resolve #42599. --- src/librustc_typeck/check/mod.rs | 41 +++++++++++++++---- src/test/compile-fail/E0559.rs | 3 +- src/test/compile-fail/E0560.rs | 1 + src/test/compile-fail/issue-19922.rs | 3 +- src/test/compile-fail/numeric-fields.rs | 3 +- .../compile-fail/struct-fields-too-many.rs | 1 + .../compile-fail/suggest-private-fields.rs | 3 +- src/test/compile-fail/union/union-fields.rs | 1 + .../issue-36798_unknown_field.stderr | 2 + .../issue-42599_available_fields_note.rs | 39 ++++++++++++++++++ .../issue-42599_available_fields_note.stderr | 30 ++++++++++++++ 11 files changed, 114 insertions(+), 13 deletions(-) create mode 100644 src/test/ui/did_you_mean/issue-42599_available_fields_note.rs create mode 100644 src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index b086c427ba59..4b40a46f74ee 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -2956,6 +2956,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { format!("did you mean `{}`?", suggested_field_name)); } else { err.span_label(field.span, "unknown field"); + let struct_variant_def = def.struct_variant(); + let available_field_names = self.available_field_names( + struct_variant_def); + err.note(&format!("available fields are: {}", + available_field_names.join(", "))); }; } ty::TyRawPtr(..) => { @@ -2979,7 +2984,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Return an hint about the closest match in field names fn suggest_field_name(variant: &'tcx ty::VariantDef, field: &Spanned, - skip : Vec) + skip: Vec) -> Option { let name = field.node.as_str(); let names = variant.fields.iter().filter_map(|field| { @@ -2992,8 +2997,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }); - // only find fits with at least one matching letter - find_best_match_for_name(names, &name, Some(name.len())) + find_best_match_for_name(names, &name, None) + } + + fn available_field_names(&self, variant: &'tcx ty::VariantDef) -> Vec { + let mut available = Vec::new(); + for field in variant.fields.iter() { + let (_, def_scope) = self.tcx.adjust(field.name, variant.did, self.body_id); + if field.vis.is_accessible_from(def_scope, self.tcx) { + available.push(field.name.to_string()); + } + } + available } // Check tuple index expressions @@ -3107,14 +3122,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { format!("field does not exist - did you mean `{}`?", field_name)); } else { match ty.sty { - ty::TyAdt(adt, ..) if adt.is_enum() => { - err.span_label(field.name.span, format!("`{}::{}` does not have this field", - ty, variant.name)); - } - _ => { - err.span_label(field.name.span, format!("`{}` does not have this field", ty)); + ty::TyAdt(adt, ..) => { + if adt.is_enum() { + err.span_label(field.name.span, + format!("`{}::{}` does not have this field", + ty, variant.name)); + } else { + err.span_label(field.name.span, + format!("`{}` does not have this field", ty)); + } + let available_field_names = self.available_field_names(variant); + err.note(&format!("available fields are: {}", + available_field_names.join(", "))); } + _ => bug!("non-ADT passed to report_unknown_field") } + }; err.emit(); } diff --git a/src/test/compile-fail/E0559.rs b/src/test/compile-fail/E0559.rs index fa6c885843e4..21bb2dc7002c 100644 --- a/src/test/compile-fail/E0559.rs +++ b/src/test/compile-fail/E0559.rs @@ -15,5 +15,6 @@ enum Field { fn main() { let s = Field::Fool { joke: 0 }; //~^ ERROR E0559 - //~| NOTE field does not exist - did you mean `x`? + //~| NOTE `Field::Fool` does not have this field + //~| NOTE available fields are: x } diff --git a/src/test/compile-fail/E0560.rs b/src/test/compile-fail/E0560.rs index c6326a0f9774..7aa6b2e86d69 100644 --- a/src/test/compile-fail/E0560.rs +++ b/src/test/compile-fail/E0560.rs @@ -16,4 +16,5 @@ fn main() { let s = Simba { mother: 1, father: 0 }; //~^ ERROR E0560 //~| NOTE `Simba` does not have this field + //~| NOTE available fields are: mother } diff --git a/src/test/compile-fail/issue-19922.rs b/src/test/compile-fail/issue-19922.rs index d7b2f2b3f991..429c4384117a 100644 --- a/src/test/compile-fail/issue-19922.rs +++ b/src/test/compile-fail/issue-19922.rs @@ -15,5 +15,6 @@ enum Homura { fn main() { let homura = Homura::Akemi { kaname: () }; //~^ ERROR variant `Homura::Akemi` has no field named `kaname` - //~| NOTE field does not exist - did you mean `madoka`? + //~| NOTE `Homura::Akemi` does not have this field + //~| NOTE available fields are: madoka } diff --git a/src/test/compile-fail/numeric-fields.rs b/src/test/compile-fail/numeric-fields.rs index 00fde3025a63..242c3a3a33d2 100644 --- a/src/test/compile-fail/numeric-fields.rs +++ b/src/test/compile-fail/numeric-fields.rs @@ -13,7 +13,8 @@ struct S(u8, u16); fn main() { let s = S{0b1: 10, 0: 11}; //~^ ERROR struct `S` has no field named `0b1` - //~| NOTE field does not exist - did you mean `1`? + //~| NOTE `S` does not have this field + //~| NOTE available fields are: 0, 1 match s { S{0: a, 0x1: b, ..} => {} //~^ ERROR does not have a field named `0x1` diff --git a/src/test/compile-fail/struct-fields-too-many.rs b/src/test/compile-fail/struct-fields-too-many.rs index 0848ada731a6..78ab94d5fb4d 100644 --- a/src/test/compile-fail/struct-fields-too-many.rs +++ b/src/test/compile-fail/struct-fields-too-many.rs @@ -18,5 +18,6 @@ fn main() { bar: 0 //~^ ERROR struct `BuildData` has no field named `bar` //~| NOTE `BuildData` does not have this field + //~| NOTE available fields are: foo }; } diff --git a/src/test/compile-fail/suggest-private-fields.rs b/src/test/compile-fail/suggest-private-fields.rs index 3672e0e90c2a..959932af9b1d 100644 --- a/src/test/compile-fail/suggest-private-fields.rs +++ b/src/test/compile-fail/suggest-private-fields.rs @@ -27,7 +27,8 @@ fn main () { //~| NOTE field does not exist - did you mean `a`? bb: 20, //~^ ERROR struct `xc::B` has no field named `bb` - //~| NOTE field does not exist - did you mean `a`? + //~| NOTE `xc::B` does not have this field + //~| NOTE available fields are: a }; // local crate struct let l = A { diff --git a/src/test/compile-fail/union/union-fields.rs b/src/test/compile-fail/union/union-fields.rs index b5d582a5746f..2bcc2204e334 100644 --- a/src/test/compile-fail/union/union-fields.rs +++ b/src/test/compile-fail/union/union-fields.rs @@ -20,6 +20,7 @@ fn main() { let u = U { a: 0, b: 1, c: 2 }; //~ ERROR union expressions should have exactly one field //~^ ERROR union `U` has no field named `c` //~| NOTE `U` does not have this field + //~| NOTE available fields are: a, b let u = U { ..u }; //~ ERROR union expressions should have exactly one field //~^ ERROR functional record update syntax requires a struct diff --git a/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr b/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr index 82e3eab0836c..610466c894aa 100644 --- a/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr +++ b/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr @@ -3,6 +3,8 @@ error[E0609]: no field `zz` on type `Foo` | 17 | f.zz; | ^^ unknown field + | + = note: available fields are: bar error: aborting due to previous error diff --git a/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs b/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs new file mode 100644 index 000000000000..4b0cc7b96a76 --- /dev/null +++ b/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs @@ -0,0 +1,39 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod submodule { + + #[derive(Default)] + pub struct Demo { + pub favorite_integer: isize, + secret_integer: isize, + pub innocently_misspellable: () + } + + impl Demo { + fn new_with_secret_two() -> Self { + Self { secret_integer: 2, inocently_mispellable: () } + } + + fn new_with_secret_three() -> Self { + Self { secret_integer: 3, egregiously_nonexistent_field: () } + } + } + +} + +fn main() { + use submodule::Demo; + + let demo = Demo::default(); + let innocent_field_misaccess = demo.inocently_mispellable; + // note shouldn't suggest private `secret_integer` field + let egregious_field_misaccess = demo.egregiously_nonexistent_field; +} diff --git a/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr b/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr new file mode 100644 index 000000000000..17edac92fd9d --- /dev/null +++ b/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr @@ -0,0 +1,30 @@ +error[E0560]: struct `submodule::Demo` has no field named `inocently_mispellable` + --> $DIR/issue-42599_available_fields_note.rs:22:39 + | +22 | Self { secret_integer: 2, inocently_mispellable: () } + | ^^^^^^^^^^^^^^^^^^^^^^ field does not exist - did you mean `innocently_misspellable`? + +error[E0560]: struct `submodule::Demo` has no field named `egregiously_nonexistent_field` + --> $DIR/issue-42599_available_fields_note.rs:26:39 + | +26 | Self { secret_integer: 3, egregiously_nonexistent_field: () } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `submodule::Demo` does not have this field + | + = note: available fields are: favorite_integer, secret_integer, innocently_misspellable + +error[E0609]: no field `inocently_mispellable` on type `submodule::Demo` + --> $DIR/issue-42599_available_fields_note.rs:36:41 + | +36 | let innocent_field_misaccess = demo.inocently_mispellable; + | ^^^^^^^^^^^^^^^^^^^^^ did you mean `innocently_misspellable`? + +error[E0609]: no field `egregiously_nonexistent_field` on type `submodule::Demo` + --> $DIR/issue-42599_available_fields_note.rs:38:42 + | +38 | let egregious_field_misaccess = demo.egregiously_nonexistent_field; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unknown field + | + = note: available fields are: favorite_integer, innocently_misspellable + +error: aborting due to 4 previous errors + From bb65d3256841e1a7d267a3177a9147fd83857727 Mon Sep 17 00:00:00 2001 From: Tymoteusz Jankowski Date: Mon, 24 Jul 2017 16:23:26 +0200 Subject: [PATCH 004/213] add prose --- src/libcore/cell.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 7e12d5466c2b..acff77004ee4 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -188,6 +188,11 @@ use ptr; /// A mutable memory location. /// +/// # Example +/// +/// Here you can see how using `Cell` allows to use muttable field inside +/// immutable struct (which is also called "interior mutability"). +/// /// ``` /// use std::cell::Cell; /// @@ -206,7 +211,7 @@ use ptr; /// // ERROR, because my_struct is immutable /// // immutable.regular_field = new_value; /// -/// // WORKS, special_field is mutable because it is Cell +/// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell /// immutable.special_field.set(new_value); /// assert_eq!(immutable.special_field.get(), new_value); /// ``` From 3c535952bc7df52b8b9becae26511fb6ccdab7b1 Mon Sep 17 00:00:00 2001 From: Tymoteusz Jankowski Date: Mon, 24 Jul 2017 18:01:50 +0200 Subject: [PATCH 005/213] review fixes --- src/libcore/cell.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index acff77004ee4..804d95f12c42 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -188,10 +188,10 @@ use ptr; /// A mutable memory location. /// -/// # Example +/// # Examples /// -/// Here you can see how using `Cell` allows to use muttable field inside -/// immutable struct (which is also called "interior mutability"). +/// Here you can see how using `Cell` allows to use mutable field inside +/// immutable struct (which is also called 'interior mutability'). /// /// ``` /// use std::cell::Cell; From 82860753463314e6a1b94f1f97d4d9c4effc0742 Mon Sep 17 00:00:00 2001 From: Tymoteusz Jankowski Date: Mon, 24 Jul 2017 18:07:51 +0200 Subject: [PATCH 006/213] ci fix? --- src/libcore/cell.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 804d95f12c42..1610e89a82d7 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -187,12 +187,9 @@ use ops::{Deref, DerefMut, CoerceUnsized}; use ptr; /// A mutable memory location. -/// /// # Examples -/// /// Here you can see how using `Cell` allows to use mutable field inside /// immutable struct (which is also called 'interior mutability'). -/// /// ``` /// use std::cell::Cell; /// @@ -207,10 +204,8 @@ use ptr; /// }; /// /// let new_value = 100; -/// /// // ERROR, because my_struct is immutable /// // immutable.regular_field = new_value; -/// /// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell /// immutable.special_field.set(new_value); /// assert_eq!(immutable.special_field.get(), new_value); From beb072a8938db93e694435e852510b79a0909fd3 Mon Sep 17 00:00:00 2001 From: Tymoteusz Jankowski Date: Mon, 24 Jul 2017 21:45:21 +0200 Subject: [PATCH 007/213] empty lines --- src/libcore/cell.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 1610e89a82d7..1e7c8dfce5b3 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -187,9 +187,12 @@ use ops::{Deref, DerefMut, CoerceUnsized}; use ptr; /// A mutable memory location. +/// /// # Examples +/// /// Here you can see how using `Cell` allows to use mutable field inside /// immutable struct (which is also called 'interior mutability'). +/// /// ``` /// use std::cell::Cell; /// @@ -204,8 +207,10 @@ use ptr; /// }; /// /// let new_value = 100; +/// /// // ERROR, because my_struct is immutable /// // immutable.regular_field = new_value; +/// /// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell /// immutable.special_field.set(new_value); /// assert_eq!(immutable.special_field.get(), new_value); From 42dae9d96d2de5e18192ec13fd00380dcaef1745 Mon Sep 17 00:00:00 2001 From: Tshepang Lekhonkhobe Date: Mon, 24 Jul 2017 23:24:42 +0200 Subject: [PATCH 008/213] doc: make into_iter example more concise --- src/libcore/iter/traits.rs | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs index 679cf3a9b23e..06ede4f3f764 100644 --- a/src/libcore/iter/traits.rs +++ b/src/libcore/iter/traits.rs @@ -147,22 +147,13 @@ pub trait FromIterator: Sized { /// /// ``` /// let v = vec![1, 2, 3]; -/// /// let mut iter = v.into_iter(); /// -/// let n = iter.next(); -/// assert_eq!(Some(1), n); -/// -/// let n = iter.next(); -/// assert_eq!(Some(2), n); -/// -/// let n = iter.next(); -/// assert_eq!(Some(3), n); -/// -/// let n = iter.next(); -/// assert_eq!(None, n); +/// assert_eq!(Some(1), iter.next()); +/// assert_eq!(Some(2), iter.next()); +/// assert_eq!(Some(3), iter.next()); +/// assert_eq!(None, iter.next()); /// ``` -/// /// Implementing `IntoIterator` for your type: /// /// ``` @@ -227,20 +218,12 @@ pub trait IntoIterator { /// /// ``` /// let v = vec![1, 2, 3]; - /// /// let mut iter = v.into_iter(); /// - /// let n = iter.next(); - /// assert_eq!(Some(1), n); - /// - /// let n = iter.next(); - /// assert_eq!(Some(2), n); - /// - /// let n = iter.next(); - /// assert_eq!(Some(3), n); - /// - /// let n = iter.next(); - /// assert_eq!(None, n); + /// assert_eq!(Some(1), iter.next()); + /// assert_eq!(Some(2), iter.next()); + /// assert_eq!(Some(3), iter.next()); + /// assert_eq!(None, iter.next()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn into_iter(self) -> Self::IntoIter; From d429a4eac81aea6070655cdfb5604187d94355a2 Mon Sep 17 00:00:00 2001 From: Tymoteusz Jankowski Date: Mon, 24 Jul 2017 23:43:34 +0200 Subject: [PATCH 009/213] s/immutable/my_struct --- src/libcore/cell.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 1e7c8dfce5b3..21b5557db99f 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -209,11 +209,11 @@ use ptr; /// let new_value = 100; /// /// // ERROR, because my_struct is immutable -/// // immutable.regular_field = new_value; +/// // my_struct.regular_field = new_value; /// /// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell -/// immutable.special_field.set(new_value); -/// assert_eq!(immutable.special_field.get(), new_value); +/// my_struct.special_field.set(new_value); +/// assert_eq!(my_struct.special_field.get(), new_value); /// ``` /// /// See the [module-level documentation](index.html) for more. From 85ef570e0031d5b8a5b1b3adadcd9a70962b585e Mon Sep 17 00:00:00 2001 From: Ian Douglas Scott Date: Mon, 24 Jul 2017 14:44:16 -0700 Subject: [PATCH 010/213] Implement AsRawFd for Stdin, Stdout, and Stderr --- src/libstd/sys/redox/ext/io.rs | 16 ++++++++++++++++ src/libstd/sys/unix/ext/io.rs | 17 +++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/src/libstd/sys/redox/ext/io.rs b/src/libstd/sys/redox/ext/io.rs index 8e7cc593dbde..9723b42354ed 100644 --- a/src/libstd/sys/redox/ext/io.rs +++ b/src/libstd/sys/redox/ext/io.rs @@ -15,6 +15,7 @@ use fs; use net; use sys; +use io; use sys_common::{self, AsInner, FromInner, IntoInner}; /// Raw file descriptors. @@ -109,6 +110,21 @@ impl AsRawFd for net::UdpSocket { } } +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for io::Stdin { + fn as_raw_fd(&self) -> RawFd { 0 } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for io::Stdout { + fn as_raw_fd(&self) -> RawFd { 1 } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for io::Stderr { + fn as_raw_fd(&self) -> RawFd { 2 } +} + #[stable(feature = "from_raw_os", since = "1.1.0")] impl FromRawFd for net::TcpStream { unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { diff --git a/src/libstd/sys/unix/ext/io.rs b/src/libstd/sys/unix/ext/io.rs index 296235e173d1..6fb0ef9c4fbe 100644 --- a/src/libstd/sys/unix/ext/io.rs +++ b/src/libstd/sys/unix/ext/io.rs @@ -16,7 +16,9 @@ use fs; use net; use os::raw; use sys; +use io; use sys_common::{self, AsInner, FromInner, IntoInner}; +use libc; /// Raw file descriptors. #[stable(feature = "rust1", since = "1.0.0")] @@ -104,6 +106,21 @@ impl AsRawFd for net::UdpSocket { fn as_raw_fd(&self) -> RawFd { *self.as_inner().socket().as_inner() } } +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for io::Stdin { + fn as_raw_fd(&self) -> RawFd { libc::STDIN_FILENO } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for io::Stdout { + fn as_raw_fd(&self) -> RawFd { libc::STDOUT_FILENO } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for io::Stderr { + fn as_raw_fd(&self) -> RawFd { libc::STDERR_FILENO } +} + #[stable(feature = "from_raw_os", since = "1.1.0")] impl FromRawFd for net::TcpStream { unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { From 66702b456cc4997f8118a8085c62481d94019bb7 Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Tue, 25 Jul 2017 18:46:14 +1200 Subject: [PATCH 011/213] save-analysis: dedup macro references --- src/librustc_save_analysis/dump_visitor.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index ebdd99dc8025..ca27bd76fff7 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -30,6 +30,7 @@ use rustc::hir::map::Node; use rustc::session::Session; use rustc::ty::{self, TyCtxt}; +use std::collections::HashSet; use std::path::Path; use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID}; @@ -74,6 +75,7 @@ pub struct DumpVisitor<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> { // we only write one macro def per unique macro definition, and // one macro use per unique callsite span. // mac_defs: HashSet, + macro_calls: HashSet, } impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { @@ -89,6 +91,7 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { span: span_utils.clone(), cur_scope: CRATE_NODE_ID, // mac_defs: HashSet::new(), + macro_calls: HashSet::new(), } } @@ -972,11 +975,19 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { /// callsite spans to record macro definition and use data, using the /// mac_uses and mac_defs sets to prevent multiples. fn process_macro_use(&mut self, span: Span) { + let source_span = span.source_callsite(); + if self.macro_calls.contains(&source_span) { + return; + } + self.macro_calls.insert(source_span); + let data = match self.save_ctxt.get_macro_use_data(span) { None => return, Some(data) => data, }; + self.dumper.macro_use(data); + // FIXME write the macro def // let mut hasher = DefaultHasher::new(); // data.callee_span.hash(&mut hasher); @@ -996,7 +1007,6 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { // }.lower(self.tcx)); // } // } - self.dumper.macro_use(data); } fn process_trait_item(&mut self, trait_item: &'l ast::TraitItem, trait_id: DefId) { From ccdfd7f7e6225a3e455ae3633cf8c5915a0b3529 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Mon, 24 Jul 2017 21:28:32 +0000 Subject: [PATCH 012/213] Add mradds to the powerpc intrinsics --- src/etc/platform-intrinsics/powerpc.json | 7 +++++++ src/librustc_platform_intrinsics/powerpc.rs | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index 5a7e986b532c..34a4dea7744b 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -16,6 +16,13 @@ "llvm": "vperm", "ret": "s32", "args": ["0", "0", "s8"] + }, + { + "intrinsic": "mradds", + "width": [128], + "llvm": "vmhraddshs", + "ret": "s16", + "args": ["0", "0", "0"] } ] } diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index 31b642b4055b..90e95ec6ae15 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -27,6 +27,11 @@ pub fn find(name: &str) -> Option { output: &::I32x4, definition: Named("llvm.ppc.altivec.vperm") }, + "_vec_mradds" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vmhraddshs") + }, _ => return None, }) } From e9c55d1f79160ab2c8d34ebf0a7533bf426d8b95 Mon Sep 17 00:00:00 2001 From: Ian Douglas Scott Date: Tue, 25 Jul 2017 16:44:25 -0700 Subject: [PATCH 013/213] Correct 'stable' attribute --- src/libstd/sys/redox/ext/io.rs | 6 +++--- src/libstd/sys/unix/ext/io.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/libstd/sys/redox/ext/io.rs b/src/libstd/sys/redox/ext/io.rs index 9723b42354ed..c4d99568c55c 100644 --- a/src/libstd/sys/redox/ext/io.rs +++ b/src/libstd/sys/redox/ext/io.rs @@ -110,17 +110,17 @@ impl AsRawFd for net::UdpSocket { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawFd for io::Stdin { fn as_raw_fd(&self) -> RawFd { 0 } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawFd for io::Stdout { fn as_raw_fd(&self) -> RawFd { 1 } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawFd for io::Stderr { fn as_raw_fd(&self) -> RawFd { 2 } } diff --git a/src/libstd/sys/unix/ext/io.rs b/src/libstd/sys/unix/ext/io.rs index 6fb0ef9c4fbe..a0323d933d65 100644 --- a/src/libstd/sys/unix/ext/io.rs +++ b/src/libstd/sys/unix/ext/io.rs @@ -106,17 +106,17 @@ impl AsRawFd for net::UdpSocket { fn as_raw_fd(&self) -> RawFd { *self.as_inner().socket().as_inner() } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawFd for io::Stdin { fn as_raw_fd(&self) -> RawFd { libc::STDIN_FILENO } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawFd for io::Stdout { fn as_raw_fd(&self) -> RawFd { libc::STDOUT_FILENO } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawFd for io::Stderr { fn as_raw_fd(&self) -> RawFd { libc::STDERR_FILENO } } From 4f6c03e24394131f2cd50eab8002cf40274cbf0d Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Wed, 26 Jul 2017 09:58:17 +0000 Subject: [PATCH 014/213] Add Vector Compare Bounds Floating-Point --- src/etc/platform-intrinsics/powerpc.json | 10 +++++++++- src/librustc_platform_intrinsics/powerpc.rs | 5 +++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index 34a4dea7744b..32648c96e4c9 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -4,7 +4,8 @@ "llvm_prefix": "llvm.ppc.altivec.", "number_info": { "unsigned": {}, - "signed": {} + "signed": {}, + "float": {} }, "width_info": { "128": { "width": "" } @@ -23,6 +24,13 @@ "llvm": "vmhraddshs", "ret": "s16", "args": ["0", "0", "0"] + }, + { + "intrinsic": "cmpb", + "width": [128], + "llvm": "vcmpbfp", + "ret": "s32", + "args": ["f32", "f32"] } ] } diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index 90e95ec6ae15..e2c5a2034958 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -32,6 +32,11 @@ pub fn find(name: &str) -> Option { output: &::I16x8, definition: Named("llvm.ppc.altivec.vmhraddshs") }, + "_vec_cmpb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpbfp") + }, _ => return None, }) } From e2b5a6b3bc4fcebbd1909186ae0398e8d2c29521 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Wed, 26 Jul 2017 09:58:17 +0000 Subject: [PATCH 015/213] Add Vector Compare Equal --- src/etc/platform-intrinsics/powerpc.json | 15 +++++++++++++-- src/librustc_platform_intrinsics/powerpc.rs | 15 +++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index 32648c96e4c9..80ec7e956f85 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -3,8 +3,12 @@ "intrinsic_prefix": "_vec_", "llvm_prefix": "llvm.ppc.altivec.", "number_info": { - "unsigned": {}, - "signed": {}, + "unsigned": { + "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } + }, + "signed": { + "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } + }, "float": {} }, "width_info": { @@ -31,6 +35,13 @@ "llvm": "vcmpbfp", "ret": "s32", "args": ["f32", "f32"] + }, + { + "intrinsic": "cmpeq{0.data_type_short}", + "width": [128], + "llvm": "vcmpequ{0.data_type_short}", + "ret": "s(8-32)", + "args": ["0", "0"] } ] } diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index e2c5a2034958..3fa248d75d90 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -37,6 +37,21 @@ pub fn find(name: &str) -> Option { output: &::I32x4, definition: Named("llvm.ppc.altivec.vcmpbfp") }, + "_vec_cmpeqb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vcmpequb") + }, + "_vec_cmpeqh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vcmpequh") + }, + "_vec_cmpeqw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpequw") + }, _ => return None, }) } From d721c1f9e3caf1f4a0e0afb0151ec127d4fd2771 Mon Sep 17 00:00:00 2001 From: Florian Zeitz Date: Wed, 26 Jul 2017 16:23:07 +0200 Subject: [PATCH 016/213] trans: Reorder basic blocks in slice_for_each This is mainly for readability of the generated LLVM IR and subsequently assembly. There is a slight positive performance impact, likely due to I-cache effects. --- src/librustc_trans/tvec.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 4216a73a8dd8..de4d217c7353 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -30,8 +30,8 @@ pub fn slice_for_each<'a, 'tcx, F>( }; let body_bcx = bcx.build_sibling_block("slice_loop_body"); - let next_bcx = bcx.build_sibling_block("slice_loop_next"); let header_bcx = bcx.build_sibling_block("slice_loop_header"); + let next_bcx = bcx.build_sibling_block("slice_loop_next"); let start = if zst { C_uint(bcx.ccx, 0usize) From ac43d58d3aca4b578864ec6dbb24d68a9f9c201c Mon Sep 17 00:00:00 2001 From: Florian Zeitz Date: Wed, 26 Jul 2017 16:27:25 +0200 Subject: [PATCH 017/213] trans: Optimize initialization using repeat expressions This elides initialization for zero-sized arrays: * for zero-sized elements we previously emitted an empty loop * for arrays with a length of zero we previously emitted a loop with zero iterations This emits llvm.memset() instead of a loop over each element when: * all elements are zero integers * elements are byte sized --- src/librustc_trans/common.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 41 ++++++++++++++++-- src/test/codegen/slice-init.rs | 74 ++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+), 4 deletions(-) create mode 100644 src/test/codegen/slice-init.rs diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 9b0803908b16..c5f69bd638f3 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -372,7 +372,7 @@ pub fn const_to_uint(v: ValueRef) -> u64 { } } -fn is_const_integral(v: ValueRef) -> bool { +pub fn is_const_integral(v: ValueRef) -> bool { unsafe { !llvm::LLVMIsAConstantInt(v).is_null() } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 4bd5091a4f35..2cae2150885a 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty::layout::{self, Layout, LayoutTyper, Primitive}; use rustc::mir::tcx::LvalueTy; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; @@ -20,7 +20,7 @@ use base; use builder::Builder; use callee; use common::{self, val_ty, C_bool, C_null, C_uint}; -use common::{C_integral}; +use common::{C_integral, C_i32}; use adt; use machine; use monomorphize; @@ -93,12 +93,47 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::Rvalue::Repeat(ref elem, ref count) => { + let dest_ty = dest.ty.to_ty(bcx.tcx()); + + // No need to inizialize memory of a zero-sized slice + if common::type_is_zero_size(bcx.ccx, dest_ty) { + return bcx; + } + let tr_elem = self.trans_operand(&bcx, elem); let size = count.as_u64(bcx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); + let align = dest.alignment.to_align(); + + if let OperandValue::Immediate(v) = tr_elem.val { + if common::is_const_integral(v) && common::const_to_uint(v) == 0 { + let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + let align = C_i32(bcx.ccx, align as i32); + let ty = type_of::type_of(bcx.ccx, dest_ty); + let size = machine::llsize_of(bcx.ccx, ty); + let fill = C_integral(Type::i8(bcx.ccx), 0, false); + base::call_memset(&bcx, base, fill, size, align, false); + return bcx; + } + } + + // Use llvm.memset.p0i8.* to initialize byte arrays + let elem_layout = bcx.ccx.layout_of(tr_elem.ty).layout; + match *elem_layout { + Layout::Scalar { value: Primitive::Int(layout::I8), .. } | + Layout::CEnum { discr: layout::I8, .. } => { + let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + let align = C_i32(bcx.ccx, align as i32); + let fill = tr_elem.immediate(); + base::call_memset(&bcx, base, fill, size, align, false); + return bcx; + } + _ => () + } + tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { - self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem); + self.store_operand(bcx, llslot, align, tr_elem); bcx.br(loop_bb); }) } diff --git a/src/test/codegen/slice-init.rs b/src/test/codegen/slice-init.rs new file mode 100644 index 000000000000..cb684af39536 --- /dev/null +++ b/src/test/codegen/slice-init.rs @@ -0,0 +1,74 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] + +// CHECK-LABEL: @zero_sized_elem +#[no_mangle] +pub fn zero_sized_elem() { + // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: call void @llvm.memset.p0i8 + let x = [(); 4]; + drop(&x); +} + +// CHECK-LABEL: @zero_len_array +#[no_mangle] +pub fn zero_len_array() { + // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: call void @llvm.memset.p0i8 + let x = [4; 0]; + drop(&x); +} + +// CHECK-LABEL: @byte_array +#[no_mangle] +pub fn byte_array() { + // CHECK: call void @llvm.memset.p0i8.i{{[0-9]+}}(i8* {{.*}}, i8 7, i64 4 + // CHECK-NOT: br label %slice_loop_header{{.*}} + let x = [7u8; 4]; + drop(&x); +} + +#[allow(dead_code)] +#[derive(Copy, Clone)] +enum Init { + Loop, + Memset, +} + +// CHECK-LABEL: @byte_enum_array +#[no_mangle] +pub fn byte_enum_array() { + // CHECK: call void @llvm.memset.p0i8.i{{[0-9]+}}(i8* {{.*}}, i8 {{.*}}, i64 4 + // CHECK-NOT: br label %slice_loop_header{{.*}} + let x = [Init::Memset; 4]; + drop(&x); +} + +// CHECK-LABEL: @zeroed_integer_array +#[no_mangle] +pub fn zeroed_integer_array() { + // CHECK: call void @llvm.memset.p0i8.i{{[0-9]+}}(i8* {{.*}}, i8 0, i64 16 + // CHECK-NOT: br label %slice_loop_header{{.*}} + let x = [0u32; 4]; + drop(&x); +} + +// CHECK-LABEL: @nonzero_integer_array +#[no_mangle] +pub fn nonzero_integer_array() { + // CHECK: br label %slice_loop_header{{.*}} + // CHECK-NOT: call void @llvm.memset.p0i8 + let x = [0x1a_2b_3c_4d_u32; 4]; + drop(&x); +} From a1995d3973e6ac28c9356d3beb53fba410aebf83 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Wed, 26 Jul 2017 09:58:17 +0000 Subject: [PATCH 018/213] Add Vector Compare Greater-Than --- src/etc/platform-intrinsics/powerpc.json | 16 +++++++++++ src/librustc_platform_intrinsics/powerpc.rs | 30 +++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index 80ec7e956f85..55a199945f16 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -4,9 +4,11 @@ "llvm_prefix": "llvm.ppc.altivec.", "number_info": { "unsigned": { + "kind" : "u", "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } }, "signed": { + "kind" : "s", "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } }, "float": {} @@ -42,6 +44,20 @@ "llvm": "vcmpequ{0.data_type_short}", "ret": "s(8-32)", "args": ["0", "0"] + }, + { + "intrinsic": "cmpgt{1.kind}{1.data_type_short}", + "width": [128], + "llvm": "vcmpgt{1.kind}{1.data_type_short}", + "ret": "s(8-32)", + "args": ["0u", "1"] + }, + { + "intrinsic": "cmpgt{1.kind}{1.data_type_short}", + "width": [128], + "llvm": "vcmpgt{1.kind}{1.data_type_short}", + "ret": "s(8-32)", + "args": ["0", "1"] } ] } diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index 3fa248d75d90..4be738eed81f 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -52,6 +52,36 @@ pub fn find(name: &str) -> Option { output: &::I32x4, definition: Named("llvm.ppc.altivec.vcmpequw") }, + "_vec_cmpgtub" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vcmpgtub") + }, + "_vec_cmpgtuh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vcmpgtuh") + }, + "_vec_cmpgtuw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpgtuw") + }, + "_vec_cmpgtsb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vcmpgtsb") + }, + "_vec_cmpgtsh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vcmpgtsh") + }, + "_vec_cmpgtsw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vcmpgtsw") + }, _ => return None, }) } From ade5ead38e3ce4565e9b3a1fc06733c336114044 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Thu, 27 Jul 2017 13:30:51 +0000 Subject: [PATCH 019/213] Add support for the Power8 and Power9 features on PowerPC --- src/librustc_trans/llvm_util.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/librustc_trans/llvm_util.rs b/src/librustc_trans/llvm_util.rs index 99ab1c47bed3..448feb5259dd 100644 --- a/src/librustc_trans/llvm_util.rs +++ b/src/librustc_trans/llvm_util.rs @@ -80,7 +80,10 @@ const X86_WHITELIST: &'static [&'static str] = &["avx\0", "avx2\0", "bmi\0", "bm const HEXAGON_WHITELIST: &'static [&'static str] = &["hvx\0", "hvx-double\0"]; -const POWERPC_WHITELIST: &'static [&'static str] = &["altivec\0", "vsx\0"]; +const POWERPC_WHITELIST: &'static [&'static str] = &["altivec\0", + "power8-altivec\0", "power9-altivec\0", + "power8-vector\0", "power9-vector\0", + "vsx\0"]; pub fn target_features(sess: &Session) -> Vec { let target_machine = create_target_machine(sess); From a718c813ed8546b0959de78aae22846c1ea9a783 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Thu, 27 Jul 2017 13:30:51 +0000 Subject: [PATCH 020/213] Add support for Vector Maximum on PowerPC --- src/etc/platform-intrinsics/powerpc.json | 7 +++++ src/librustc_platform_intrinsics/powerpc.rs | 30 +++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index 55a199945f16..aaab224297ee 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -58,6 +58,13 @@ "llvm": "vcmpgt{1.kind}{1.data_type_short}", "ret": "s(8-32)", "args": ["0", "1"] + }, + { + "intrinsic": "max{0.kind}{0.data_type_short}", + "width": [128], + "llvm": "vmax{0.kind}{0.data_type_short}", + "ret": "i(8-32)", + "args": ["0", "0"] } ] } diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index 4be738eed81f..327299a0d059 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -82,6 +82,36 @@ pub fn find(name: &str) -> Option { output: &::I32x4, definition: Named("llvm.ppc.altivec.vcmpgtsw") }, + "_vec_maxsb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vmaxsb") + }, + "_vec_maxub" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.ppc.altivec.vmaxub") + }, + "_vec_maxsh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vmaxsh") + }, + "_vec_maxuh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.ppc.altivec.vmaxuh") + }, + "_vec_maxsw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vmaxsw") + }, + "_vec_maxuw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vmaxuw") + }, _ => return None, }) } From c9bdd518eb64c0072dae0df01ce67fedf728adb4 Mon Sep 17 00:00:00 2001 From: QuietMisdreavus Date: Thu, 27 Jul 2017 13:26:24 -0500 Subject: [PATCH 021/213] add [src] links to associated functions inside an impl block --- src/librustdoc/html/render.rs | 10 +++++++++- src/librustdoc/html/static/rustdoc.css | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 9b1d256304dd..43cabc97119f 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -2939,7 +2939,15 @@ fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLi write!(w, "")?; render_assoc_item(w, item, link.anchor(&id), ItemType::Impl)?; write!(w, "")?; - render_stability_since_raw(w, item.stable_since(), outer_version)?; + if let Some(l) = (Item { cx, item }).src_href() { + write!(w, "")?; + write!(w, "
")?; + render_stability_since_raw(w, item.stable_since(), outer_version)?; + write!(w, "
[src]", + l, "goto source code")?; + } else { + render_stability_since_raw(w, item.stable_since(), outer_version)?; + } write!(w, "\n")?; } } diff --git a/src/librustdoc/html/static/rustdoc.css b/src/librustdoc/html/static/rustdoc.css index 9314f57359ac..858ef3bf411d 100644 --- a/src/librustdoc/html/static/rustdoc.css +++ b/src/librustdoc/html/static/rustdoc.css @@ -297,6 +297,10 @@ h3.impl > .out-of-band { font-size: 21px; } +h4.method > .out-of-band { + font-size: 19px; +} + h4 > code, h3 > code, .invisible > code { position: inherit; } From 7e70a63e615d399072c8b8c2054d8d61844240d6 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 2 Jul 2017 01:37:47 +0200 Subject: [PATCH 022/213] Throw errors when doc comments are added where they're unused --- src/librustc/hir/mod.rs | 2 +- src/libsyntax/parse/parser.rs | 41 ++++++++++++++++++++--- src/test/compile-fail/issue-34222.rs | 2 +- src/test/compile-fail/useless_comment.rs | 26 ++++++++++++++ src/test/compile-fail/useless_comment2.rs | 25 ++++++++++++++ src/test/compile-fail/useless_comment3.rs | 22 ++++++++++++ 6 files changed, 111 insertions(+), 7 deletions(-) create mode 100644 src/test/compile-fail/useless_comment.rs create mode 100644 src/test/compile-fail/useless_comment2.rs create mode 100644 src/test/compile-fail/useless_comment3.rs diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index fd79ec3b6b92..1b14caad3c83 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -1679,7 +1679,7 @@ pub struct Item { #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub enum Item_ { - /// An`extern crate` item, with optional original crate name, + /// An `extern crate` item, with optional original crate name, /// /// e.g. `extern crate foo` or `extern crate foo_bar as foo` ItemExternCrate(Option), diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index af9a198b9830..047f4b979d98 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -2131,14 +2131,14 @@ impl<'a> Parser<'a> { } else { Ok(self.mk_expr(span, ExprKind::Tup(es), attrs)) } - }, + } token::OpenDelim(token::Brace) => { return self.parse_block_expr(lo, BlockCheckMode::Default, attrs); - }, - token::BinOp(token::Or) | token::OrOr => { + } + token::BinOp(token::Or) | token::OrOr => { let lo = self.span; return self.parse_lambda_expr(lo, CaptureBy::Ref, attrs); - }, + } token::OpenDelim(token::Bracket) => { self.bump(); @@ -2387,7 +2387,6 @@ impl<'a> Parser<'a> { pub fn parse_block_expr(&mut self, lo: Span, blk_mode: BlockCheckMode, outer_attrs: ThinVec) -> PResult<'a, P> { - self.expect(&token::OpenDelim(token::Brace))?; let mut attrs = outer_attrs; @@ -2421,6 +2420,12 @@ impl<'a> Parser<'a> { expr.map(|mut expr| { attrs.extend::>(expr.attrs.into()); expr.attrs = attrs; + if if let Some(ref doc) = expr.attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + true + } else { false } { + return expr; + } match expr.node { ExprKind::If(..) | ExprKind::IfLet(..) => { if !expr.attrs.is_empty() { @@ -3105,6 +3110,9 @@ impl<'a> Parser<'a> { // `else` token already eaten pub fn parse_else_expr(&mut self) -> PResult<'a, P> { + if self.prev_token_kind == PrevTokenKind::DocComment { + return Err(self.span_fatal_err(self.span, Error::UselessDocComment)); + } if self.eat_keyword(keywords::If) { return self.parse_if_expr(ThinVec::new()); } else { @@ -3118,6 +3126,9 @@ impl<'a> Parser<'a> { span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { // Parse: `for in ` + if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + } let pat = self.parse_pat()?; self.expect_keyword(keywords::In)?; @@ -3133,6 +3144,9 @@ impl<'a> Parser<'a> { pub fn parse_while_expr(&mut self, opt_ident: Option, span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { + if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + } if self.token.is_keyword(keywords::Let) { return self.parse_while_let_expr(opt_ident, span_lo, attrs); } @@ -3161,6 +3175,9 @@ impl<'a> Parser<'a> { pub fn parse_loop_expr(&mut self, opt_ident: Option, span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { + if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + } let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); @@ -3171,6 +3188,9 @@ impl<'a> Parser<'a> { pub fn parse_catch_expr(&mut self, span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { + if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + } let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr(span_lo.to(body.span), ExprKind::Catch(body), attrs)) @@ -3178,6 +3198,9 @@ impl<'a> Parser<'a> { // `match` token already eaten fn parse_match_expr(&mut self, mut attrs: ThinVec) -> PResult<'a, P> { + if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + } let match_span = self.prev_span; let lo = self.prev_span; let discriminant = self.parse_expr_res(RESTRICTION_NO_STRUCT_LITERAL, @@ -3215,6 +3238,9 @@ impl<'a> Parser<'a> { maybe_whole!(self, NtArm, |x| x); let attrs = self.parse_outer_attributes()?; + if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + } let pats = self.parse_pats()?; let guard = if self.eat_keyword(keywords::If) { Some(self.parse_expr()?) @@ -3669,6 +3695,9 @@ impl<'a> Parser<'a> { /// Parse a local variable declaration fn parse_local(&mut self, attrs: ThinVec) -> PResult<'a, P> { + if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { + self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); + } let lo = self.span; let pat = self.parse_pat()?; @@ -4158,6 +4187,8 @@ impl<'a> Parser<'a> { stmts.push(stmt); } else if self.token == token::Eof { break; + } else if let token::DocComment(_) = self.token { + return Err(self.span_fatal_err(self.span, Error::UselessDocComment)); } else { // Found only `;` or `}`. continue; diff --git a/src/test/compile-fail/issue-34222.rs b/src/test/compile-fail/issue-34222.rs index 4609c0ccb1cf..d406f59d0a2c 100644 --- a/src/test/compile-fail/issue-34222.rs +++ b/src/test/compile-fail/issue-34222.rs @@ -13,6 +13,6 @@ #[rustc_error] fn main() { //~ ERROR compilation successful - /// crash + // crash let x = 0; } diff --git a/src/test/compile-fail/useless_comment.rs b/src/test/compile-fail/useless_comment.rs new file mode 100644 index 000000000000..a32988aff124 --- /dev/null +++ b/src/test/compile-fail/useless_comment.rs @@ -0,0 +1,26 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn foo3() -> i32 { + let mut x = 12; + /// z //~ ERROR E0585 + while x < 1 { + /// x //~ ERROR E0585 + //~^ ERROR attributes on non-item statements and expressions are experimental + x += 1; + } + /// d //~ ERROR E0585 + return x; +} + +fn main() { + /// e //~ ERROR E0585 + foo3(); +} diff --git a/src/test/compile-fail/useless_comment2.rs b/src/test/compile-fail/useless_comment2.rs new file mode 100644 index 000000000000..52ac7b6a7694 --- /dev/null +++ b/src/test/compile-fail/useless_comment2.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn foo() { + /// a //~ ERROR E0585 + let x = 12; + + /// b //~ ERROR E0585 + match x { + /// c //~ ERROR E0585 + 1 => {}, + _ => {} + } +} + +fn main() { + foo(); +} \ No newline at end of file diff --git a/src/test/compile-fail/useless_comment3.rs b/src/test/compile-fail/useless_comment3.rs new file mode 100644 index 000000000000..c26031b5eb64 --- /dev/null +++ b/src/test/compile-fail/useless_comment3.rs @@ -0,0 +1,22 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn foo() { + let x = 13; + /// x //~ ERROR E0585 + if x == 12 { + /// y + println!("hello"); + } +} + +fn main() { + foo(); +} From b68a03bded5380278ae162bacc79d926e41fd76c Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Mon, 3 Jul 2017 00:27:36 +0200 Subject: [PATCH 023/213] Change doc comment to code comment --- src/librustc/middle/region.rs | 8 +++---- src/librustc_typeck/check/wfcheck.rs | 34 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 6455d7ecf85a..39cb5d1b8c8e 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -458,10 +458,10 @@ impl<'tcx> RegionMaps { -> CodeExtent { if scope_a == scope_b { return scope_a; } - /// [1] The initial values for `a_buf` and `b_buf` are not used. - /// The `ancestors_of` function will return some prefix that - /// is re-initialized with new values (or else fallback to a - /// heap-allocated vector). + // [1] The initial values for `a_buf` and `b_buf` are not used. + // The `ancestors_of` function will return some prefix that + // is re-initialized with new values (or else fallback to a + // heap-allocated vector). let mut a_buf: [CodeExtent; 32] = [scope_a /* [1] */; 32]; let mut a_vec: Vec = vec![]; let mut b_buf: [CodeExtent; 32] = [scope_b /* [1] */; 32]; diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 69cd14146287..cf5882bb9bdb 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -89,23 +89,23 @@ impl<'a, 'gcx> CheckTypeWellFormedVisitor<'a, 'gcx> { tcx.item_path_str(tcx.hir.local_def_id(item.id))); match item.node { - /// Right now we check that every default trait implementation - /// has an implementation of itself. Basically, a case like: - /// - /// `impl Trait for T {}` - /// - /// has a requirement of `T: Trait` which was required for default - /// method implementations. Although this could be improved now that - /// there's a better infrastructure in place for this, it's being left - /// for a follow-up work. - /// - /// Since there's such a requirement, we need to check *just* positive - /// implementations, otherwise things like: - /// - /// impl !Send for T {} - /// - /// won't be allowed unless there's an *explicit* implementation of `Send` - /// for `T` + // Right now we check that every default trait implementation + // has an implementation of itself. Basically, a case like: + // + // `impl Trait for T {}` + // + // has a requirement of `T: Trait` which was required for default + // method implementations. Although this could be improved now that + // there's a better infrastructure in place for this, it's being left + // for a follow-up work. + // + // Since there's such a requirement, we need to check *just* positive + // implementations, otherwise things like: + // + // impl !Send for T {} + // + // won't be allowed unless there's an *explicit* implementation of `Send` + // for `T` hir::ItemImpl(_, hir::ImplPolarity::Positive, _, _, ref trait_ref, ref self_ty, _) => { self.check_impl(item, self_ty, trait_ref); From 1cebf98e4c2654548e764e937e0b712220ffb600 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 16 Jul 2017 00:17:35 +0200 Subject: [PATCH 024/213] Make a lint instead --- src/librustc/hir/mod.rs | 7 ++++ src/librustc_lint/builtin.rs | 40 +++++++++++++++++++++++ src/librustc_lint/lib.rs | 1 + src/libsyntax/ast.rs | 7 ++++ src/libsyntax/parse/parser.rs | 32 ------------------ src/test/compile-fail/useless_comment.rs | 28 +++++++++------- src/test/compile-fail/useless_comment2.rs | 25 -------------- src/test/compile-fail/useless_comment3.rs | 22 ------------- 8 files changed, 71 insertions(+), 91 deletions(-) delete mode 100644 src/test/compile-fail/useless_comment2.rs delete mode 100644 src/test/compile-fail/useless_comment3.rs diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index 1b14caad3c83..a3a133daa09c 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -892,6 +892,13 @@ impl Decl_ { DeclItem(_) => &[] } } + + pub fn is_local(&self) -> bool { + match *self { + Decl_::DeclLocal(_) => true, + _ => false, + } + } } /// represents one arm of a 'match' diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 02d68a41b4cc..ca30ed4a536e 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -722,6 +722,46 @@ impl EarlyLintPass for IllegalFloatLiteralPattern { } } +declare_lint! { + pub UNUSED_DOC_COMMENT, + Warn, + "detects doc comments that aren't used by rustdoc" +} + +#[derive(Copy, Clone)] +pub struct UnusedDocComment; + +impl LintPass for UnusedDocComment { + fn get_lints(&self) -> LintArray { + lint_array![UNUSED_DOC_COMMENT] + } +} + +impl UnusedDocComment { + fn warn_if_doc<'a, 'tcx, + I: Iterator, + C: LintContext<'tcx>>(&self, mut attrs: I, cx: &C) { + if let Some(attr) = attrs.find(|a| a.is_value_str() && a.check_name("doc")) { + cx.struct_span_lint(UNUSED_DOC_COMMENT, attr.span, "doc comment not used by rustdoc") + .emit(); + } + } +} + +impl EarlyLintPass for UnusedDocComment { + fn check_local(&mut self, cx: &EarlyContext, decl: &ast::Local) { + self.warn_if_doc(decl.attrs.iter(), cx); + } + + fn check_arm(&mut self, cx: &EarlyContext, arm: &ast::Arm) { + self.warn_if_doc(arm.attrs.iter(), cx); + } + + fn check_expr(&mut self, cx: &EarlyContext, expr: &ast::Expr) { + self.warn_if_doc(expr.attrs.iter(), cx); + } +} + declare_lint! { pub UNCONDITIONAL_RECURSION, Warn, diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index 21dca7f6c61c..83c00c178a0b 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -111,6 +111,7 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { UnusedImportBraces, AnonymousParameters, IllegalFloatLiteralPattern, + UnusedDocComment, ); add_early_builtin_with_new!(sess, diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index f7d9d532062a..df3f68fd1c6c 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -718,6 +718,13 @@ impl Stmt { }; self } + + pub fn is_item(&self) -> bool { + match self.node { + StmtKind::Local(_) => true, + _ => false, + } + } } impl fmt::Debug for Stmt { diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 047f4b979d98..582f72e398d7 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -2420,12 +2420,6 @@ impl<'a> Parser<'a> { expr.map(|mut expr| { attrs.extend::>(expr.attrs.into()); expr.attrs = attrs; - if if let Some(ref doc) = expr.attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - true - } else { false } { - return expr; - } match expr.node { ExprKind::If(..) | ExprKind::IfLet(..) => { if !expr.attrs.is_empty() { @@ -3110,9 +3104,6 @@ impl<'a> Parser<'a> { // `else` token already eaten pub fn parse_else_expr(&mut self) -> PResult<'a, P> { - if self.prev_token_kind == PrevTokenKind::DocComment { - return Err(self.span_fatal_err(self.span, Error::UselessDocComment)); - } if self.eat_keyword(keywords::If) { return self.parse_if_expr(ThinVec::new()); } else { @@ -3126,9 +3117,6 @@ impl<'a> Parser<'a> { span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { // Parse: `for in ` - if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - } let pat = self.parse_pat()?; self.expect_keyword(keywords::In)?; @@ -3144,9 +3132,6 @@ impl<'a> Parser<'a> { pub fn parse_while_expr(&mut self, opt_ident: Option, span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { - if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - } if self.token.is_keyword(keywords::Let) { return self.parse_while_let_expr(opt_ident, span_lo, attrs); } @@ -3175,9 +3160,6 @@ impl<'a> Parser<'a> { pub fn parse_loop_expr(&mut self, opt_ident: Option, span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { - if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - } let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); @@ -3188,9 +3170,6 @@ impl<'a> Parser<'a> { pub fn parse_catch_expr(&mut self, span_lo: Span, mut attrs: ThinVec) -> PResult<'a, P> { - if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - } let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr(span_lo.to(body.span), ExprKind::Catch(body), attrs)) @@ -3198,9 +3177,6 @@ impl<'a> Parser<'a> { // `match` token already eaten fn parse_match_expr(&mut self, mut attrs: ThinVec) -> PResult<'a, P> { - if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - } let match_span = self.prev_span; let lo = self.prev_span; let discriminant = self.parse_expr_res(RESTRICTION_NO_STRUCT_LITERAL, @@ -3238,9 +3214,6 @@ impl<'a> Parser<'a> { maybe_whole!(self, NtArm, |x| x); let attrs = self.parse_outer_attributes()?; - if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - } let pats = self.parse_pats()?; let guard = if self.eat_keyword(keywords::If) { Some(self.parse_expr()?) @@ -3695,9 +3668,6 @@ impl<'a> Parser<'a> { /// Parse a local variable declaration fn parse_local(&mut self, attrs: ThinVec) -> PResult<'a, P> { - if let Some(doc) = attrs.iter().find(|x| x.is_sugared_doc) { - self.span_fatal_err(doc.span, Error::UselessDocComment).emit(); - } let lo = self.span; let pat = self.parse_pat()?; @@ -4187,8 +4157,6 @@ impl<'a> Parser<'a> { stmts.push(stmt); } else if self.token == token::Eof { break; - } else if let token::DocComment(_) = self.token { - return Err(self.span_fatal_err(self.span, Error::UselessDocComment)); } else { // Found only `;` or `}`. continue; diff --git a/src/test/compile-fail/useless_comment.rs b/src/test/compile-fail/useless_comment.rs index a32988aff124..bceec186120a 100644 --- a/src/test/compile-fail/useless_comment.rs +++ b/src/test/compile-fail/useless_comment.rs @@ -8,19 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -fn foo3() -> i32 { - let mut x = 12; - /// z //~ ERROR E0585 - while x < 1 { - /// x //~ ERROR E0585 - //~^ ERROR attributes on non-item statements and expressions are experimental - x += 1; +#![deny(unused_doc_comment)] + +fn foo() { + /// a //~ ERROR unused doc comment + let x = 12; + + /// b //~ ERROR unused doc comment + match x { + /// c //~ ERROR unused doc comment + 1 => {}, + _ => {} } - /// d //~ ERROR E0585 - return x; + + /// foo //~ ERROR unused doc comment + unsafe {} } fn main() { - /// e //~ ERROR E0585 - foo3(); -} + foo(); +} \ No newline at end of file diff --git a/src/test/compile-fail/useless_comment2.rs b/src/test/compile-fail/useless_comment2.rs deleted file mode 100644 index 52ac7b6a7694..000000000000 --- a/src/test/compile-fail/useless_comment2.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn foo() { - /// a //~ ERROR E0585 - let x = 12; - - /// b //~ ERROR E0585 - match x { - /// c //~ ERROR E0585 - 1 => {}, - _ => {} - } -} - -fn main() { - foo(); -} \ No newline at end of file diff --git a/src/test/compile-fail/useless_comment3.rs b/src/test/compile-fail/useless_comment3.rs deleted file mode 100644 index c26031b5eb64..000000000000 --- a/src/test/compile-fail/useless_comment3.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn foo() { - let x = 13; - /// x //~ ERROR E0585 - if x == 12 { - /// y - println!("hello"); - } -} - -fn main() { - foo(); -} From 2f2623b79db33fc3bcb8bd165c549b5bba1ee240 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Thu, 27 Jul 2017 20:40:20 +0200 Subject: [PATCH 025/213] Update tests --- src/test/compile-fail/issue-34222.rs | 18 ------------------ src/test/compile-fail/useless_comment.rs | 8 ++++---- 2 files changed, 4 insertions(+), 22 deletions(-) delete mode 100644 src/test/compile-fail/issue-34222.rs diff --git a/src/test/compile-fail/issue-34222.rs b/src/test/compile-fail/issue-34222.rs deleted file mode 100644 index d406f59d0a2c..000000000000 --- a/src/test/compile-fail/issue-34222.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![feature(rustc_attrs)] -#![allow(warnings)] - -#[rustc_error] -fn main() { //~ ERROR compilation successful - // crash - let x = 0; -} diff --git a/src/test/compile-fail/useless_comment.rs b/src/test/compile-fail/useless_comment.rs index bceec186120a..a1172bb214d0 100644 --- a/src/test/compile-fail/useless_comment.rs +++ b/src/test/compile-fail/useless_comment.rs @@ -11,17 +11,17 @@ #![deny(unused_doc_comment)] fn foo() { - /// a //~ ERROR unused doc comment + /// a //~ ERROR doc comment not used by rustdoc let x = 12; - /// b //~ ERROR unused doc comment + /// b //~ doc comment not used by rustdoc match x { - /// c //~ ERROR unused doc comment + /// c //~ ERROR doc comment not used by rustdoc 1 => {}, _ => {} } - /// foo //~ ERROR unused doc comment + /// foo //~ ERROR doc comment not used by rustdoc unsafe {} } From e268ddf52d7b52b0633572dacdf628c17978f1df Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Thu, 27 Jul 2017 23:12:08 +0300 Subject: [PATCH 026/213] erase types in the move-path abstract domain Leaving types unerased would lead to 2 types with a different "name" getting different move-paths, which would cause major brokenness (see e.g. #42903). This does not fix any *known* issue, but is required if we want to use abs_domain with non-erased regions (because the same can easily have different names). cc @RalfJung. --- src/librustc/ich/impls_mir.rs | 16 ++++++++------- src/librustc/mir/mod.rs | 20 +++++++++---------- .../dataflow/move_paths/abs_domain.rs | 11 ++++++++-- 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index cb017b7f8864..6dadb702b9f2 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -258,10 +258,11 @@ impl<'a, 'gcx, 'tcx> HashStable> for mir::L } } -impl<'a, 'gcx, 'tcx, B, V> HashStable> -for mir::Projection<'tcx, B, V> +impl<'a, 'gcx, 'tcx, B, V, T> HashStable> +for mir::Projection<'tcx, B, V, T> where B: HashStable>, - V: HashStable> + V: HashStable>, + T: HashStable> { fn hash_stable(&self, hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, @@ -276,9 +277,10 @@ for mir::Projection<'tcx, B, V> } } -impl<'a, 'gcx, 'tcx, V> HashStable> -for mir::ProjectionElem<'tcx, V> - where V: HashStable> +impl<'a, 'gcx, 'tcx, V, T> HashStable> +for mir::ProjectionElem<'tcx, V, T> + where V: HashStable>, + T: HashStable> { fn hash_stable(&self, hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, @@ -286,7 +288,7 @@ for mir::ProjectionElem<'tcx, V> mem::discriminant(self).hash_stable(hcx, hasher); match *self { mir::ProjectionElem::Deref => {} - mir::ProjectionElem::Field(field, ty) => { + mir::ProjectionElem::Field(field, ref ty) => { field.hash_stable(hcx, hasher); ty.hash_stable(hcx, hasher); } diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index d78e17ce03ce..3dcd64af2ede 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -887,15 +887,15 @@ impl_stable_hash_for!(struct Static<'tcx> { /// shared between `Constant` and `Lvalue`. See the aliases /// `LvalueProjection` etc below. #[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct Projection<'tcx, B, V> { +pub struct Projection<'tcx, B, V, T> { pub base: B, - pub elem: ProjectionElem<'tcx, V>, + pub elem: ProjectionElem<'tcx, V, T>, } #[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub enum ProjectionElem<'tcx, V> { +pub enum ProjectionElem<'tcx, V, T> { Deref, - Field(Field, Ty<'tcx>), + Field(Field, T), Index(V), /// These indices are generated by slice patterns. Easiest to explain @@ -932,11 +932,11 @@ pub enum ProjectionElem<'tcx, V> { /// Alias for projections as they appear in lvalues, where the base is an lvalue /// and the index is an operand. -pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>>; +pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>, Ty<'tcx>>; /// Alias for projections as they appear in lvalues, where the base is an lvalue /// and the index is an operand. -pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>>; +pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>, Ty<'tcx>>; newtype_index!(Field, "field"); @@ -1720,8 +1720,8 @@ impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { } } -impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> - where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx> +impl<'tcx, B, V, T> TypeFoldable<'tcx> for Projection<'tcx, B, V, T> + where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx>, T: TypeFoldable<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { use mir::ProjectionElem::*; @@ -1729,7 +1729,7 @@ impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> let base = self.base.fold_with(folder); let elem = match self.elem { Deref => Deref, - Field(f, ty) => Field(f, ty.fold_with(folder)), + Field(f, ref ty) => Field(f, ty.fold_with(folder)), Index(ref v) => Index(v.fold_with(folder)), ref elem => elem.clone() }; @@ -1745,7 +1745,7 @@ impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V> self.base.visit_with(visitor) || match self.elem { - Field(_, ty) => ty.visit_with(visitor), + Field(_, ref ty) => ty.visit_with(visitor), Index(ref v) => v.visit_with(visitor), _ => false } diff --git a/src/librustc_mir/dataflow/move_paths/abs_domain.rs b/src/librustc_mir/dataflow/move_paths/abs_domain.rs index 5e61c2ec7a29..1255209322b0 100644 --- a/src/librustc_mir/dataflow/move_paths/abs_domain.rs +++ b/src/librustc_mir/dataflow/move_paths/abs_domain.rs @@ -23,11 +23,14 @@ use rustc::mir::LvalueElem; use rustc::mir::{Operand, ProjectionElem}; +use rustc::ty::Ty; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct AbstractOperand; +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct AbstractType; pub type AbstractElem<'tcx> = - ProjectionElem<'tcx, AbstractOperand>; + ProjectionElem<'tcx, AbstractOperand, AbstractType>; pub trait Lift { type Abstract; @@ -37,6 +40,10 @@ impl<'tcx> Lift for Operand<'tcx> { type Abstract = AbstractOperand; fn lift(&self) -> Self::Abstract { AbstractOperand } } +impl<'tcx> Lift for Ty<'tcx> { + type Abstract = AbstractType; + fn lift(&self) -> Self::Abstract { AbstractType } +} impl<'tcx> Lift for LvalueElem<'tcx> { type Abstract = AbstractElem<'tcx>; fn lift(&self) -> Self::Abstract { @@ -44,7 +51,7 @@ impl<'tcx> Lift for LvalueElem<'tcx> { ProjectionElem::Deref => ProjectionElem::Deref, ProjectionElem::Field(ref f, ty) => - ProjectionElem::Field(f.clone(), ty.clone()), + ProjectionElem::Field(f.clone(), ty.lift()), ProjectionElem::Index(ref i) => ProjectionElem::Index(i.lift()), ProjectionElem::Subslice {from, to} => From cbce0aa341c8be3f4b9253c93ed641ed454fc0a0 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Thu, 27 Jul 2017 13:30:51 +0000 Subject: [PATCH 027/213] Add support for Vector Minimum on PowerPC --- src/etc/platform-intrinsics/powerpc.json | 7 +++++ src/librustc_platform_intrinsics/powerpc.rs | 30 +++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json index aaab224297ee..7f01aaa3ac11 100644 --- a/src/etc/platform-intrinsics/powerpc.json +++ b/src/etc/platform-intrinsics/powerpc.json @@ -65,6 +65,13 @@ "llvm": "vmax{0.kind}{0.data_type_short}", "ret": "i(8-32)", "args": ["0", "0"] + }, + { + "intrinsic": "min{0.kind}{0.data_type_short}", + "width": [128], + "llvm": "vmin{0.kind}{0.data_type_short}", + "ret": "i(8-32)", + "args": ["0", "0"] } ] } diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs index 327299a0d059..60074cce2b9a 100644 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ b/src/librustc_platform_intrinsics/powerpc.rs @@ -112,6 +112,36 @@ pub fn find(name: &str) -> Option { output: &::U32x4, definition: Named("llvm.ppc.altivec.vmaxuw") }, + "_vec_minsb" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.ppc.altivec.vminsb") + }, + "_vec_minub" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.ppc.altivec.vminub") + }, + "_vec_minsh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.ppc.altivec.vminsh") + }, + "_vec_minuh" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.ppc.altivec.vminuh") + }, + "_vec_minsw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.ppc.altivec.vminsw") + }, + "_vec_minuw" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.ppc.altivec.vminuw") + }, _ => return None, }) } From efc67646fabbba0d79b386fb9fec2f89c20314bc Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Thu, 27 Jul 2017 20:40:24 -0700 Subject: [PATCH 028/213] Support homogeneous aggregates for hard-float ARM Hard-float ARM targets use the AACPS-VFP ABI, which passes and returns homogeneous float/vector aggregates in the VFP registers. Fixes #43329. --- src/librustc_trans/cabi_arm.rs | 60 +++++++++++++++++++++++++++++++--- 1 file changed, 55 insertions(+), 5 deletions(-) diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 7a91cad511d6..635741b4d1ac 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -8,14 +8,50 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, ArgType, LayoutExt, Reg, Uniform}; +use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform}; use context::CrateContext; +use llvm::CallConv; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) + -> Option { + arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { + let size = arg.layout.size(ccx); + + // Ensure we have at most four uniquely addressable members. + if size > unit.size.checked_mul(4, ccx).unwrap() { + return None; + } + + let valid_unit = match unit.kind { + RegKind::Integer => false, + RegKind::Float => true, + RegKind::Vector => size.bits() == 64 || size.bits() == 128 + }; + + if valid_unit { + Some(Uniform { + unit, + total: size + }) + } else { + None + } + }) +} + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>, vfp: bool) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); return; } + + if vfp { + if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { + ret.cast_to(ccx, uniform); + return; + } + } + let size = ret.layout.size(ccx); let bits = size.bits(); if bits <= 32 { @@ -35,11 +71,19 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc ret.make_indirect(ccx); } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { +fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) { if !arg.layout.is_aggregate() { arg.extend_integer_width_to(32); return; } + + if vfp { + if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { + arg.cast_to(ccx, uniform); + return; + } + } + let align = arg.layout.align(ccx).abi(); let total = arg.layout.size(ccx); arg.cast_to(ccx, Uniform { @@ -49,12 +93,18 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + // If this is a target with a hard-float ABI, and the function is not explicitly + // `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates. + let vfp = ccx.sess().target.target.llvm_target.ends_with("hf") + && fty.cconv != CallConv::ArmAapcsCallConv + && !fty.variadic; + if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, vfp); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(ccx, arg, vfp); } } From e1206c4b6711dc9c252a154c21b951243bf7944b Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Fri, 28 Jul 2017 15:09:12 +1200 Subject: [PATCH 029/213] save-anlaysis: fix filter_generated --- src/librustc_save_analysis/span_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_save_analysis/span_utils.rs b/src/librustc_save_analysis/span_utils.rs index 77cde33e9620..660fe7dfa3d9 100644 --- a/src/librustc_save_analysis/span_utils.rs +++ b/src/librustc_save_analysis/span_utils.rs @@ -409,7 +409,7 @@ impl<'a> SpanUtils<'a> { // Otherwise, a generated span is deemed invalid if it is not a sub-span of the root // callsite. This filters out macro internal variables and most malformed spans. - !parent.source_callsite().contains(parent) + !parent.source_callsite().contains(sub_span.unwrap()) } } From bbc00c9e9c162fffddf7e8a4585aab10ecfc9d6d Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Fri, 28 Jul 2017 16:42:39 +1200 Subject: [PATCH 030/213] format!: use a dummy span rather than callee span for the span base for temporary variables --- src/libsyntax_ext/format.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/libsyntax_ext/format.rs b/src/libsyntax_ext/format.rs index 7351377e7711..9734bb867f1d 100644 --- a/src/libsyntax_ext/format.rs +++ b/src/libsyntax_ext/format.rs @@ -20,7 +20,7 @@ use syntax::ext::build::AstBuilder; use syntax::parse::token; use syntax::ptr::P; use syntax::symbol::{Symbol, keywords}; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; use syntax::tokenstream; use std::collections::{HashMap, HashSet}; @@ -558,8 +558,10 @@ impl<'a, 'b> Context<'a, 'b> { // passed to this function. for (i, e) in self.args.into_iter().enumerate() { let name = self.ecx.ident_of(&format!("__arg{}", i)); - let span = - Span { ctxt: e.span.ctxt.apply_mark(self.ecx.current_expansion.mark), ..e.span }; + let span = Span { + ctxt: e.span.ctxt.apply_mark(self.ecx.current_expansion.mark), + ..DUMMY_SP + }; pats.push(self.ecx.pat_ident(span, name)); for ref arg_ty in self.arg_unique_types[i].iter() { locals.push(Context::format_arg(self.ecx, self.macsp, e.span, arg_ty, name)); From c4710203c098b68b5f80b1507e889ad894855729 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Fri, 28 Jul 2017 02:03:23 +0000 Subject: [PATCH 031/213] Make LLVMRustHasFeature more robust The function should accept feature strings that old LLVM might not support. Simplify the code using the same approach used by LLVMRustPrintTargetFeatures. Dummify the function for non 4.0 LLVM and update the tests accordingly. --- src/rustllvm/PassWrapper.cpp | 20 +++++++------------- src/test/run-make/print-cfg/Makefile | 2 +- src/test/run-pass/sse2.rs | 1 + 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/src/rustllvm/PassWrapper.cpp b/src/rustllvm/PassWrapper.cpp index 7fb1eafb30da..57e90be27748 100644 --- a/src/rustllvm/PassWrapper.cpp +++ b/src/rustllvm/PassWrapper.cpp @@ -181,20 +181,14 @@ extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM, TargetMachine *Target = unwrap(TM); const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo(); const FeatureBitset &Bits = MCInfo->getFeatureBits(); - const llvm::SubtargetFeatureKV *FeatureEntry; +#if LLVM_VERSION_GE(4, 0) + const ArrayRef FeatTable = MCInfo->getFeatureTable(); -#define SUBTARGET(x) \ - if (MCInfo->isCPUStringValid(x##SubTypeKV[0].Key)) { \ - FeatureEntry = x##FeatureKV; \ - } else - - GEN_SUBTARGETS { return false; } -#undef SUBTARGET - - while (strcmp(Feature, FeatureEntry->Key) != 0) - FeatureEntry++; - - return (Bits & FeatureEntry->Value) == FeatureEntry->Value; + for (auto &FeatureEntry : FeatTable) + if (!strcmp(FeatureEntry.Key, Feature)) + return (Bits & FeatureEntry.Value) == FeatureEntry.Value; +#endif + return false; } enum class LLVMRustCodeModel { diff --git a/src/test/run-make/print-cfg/Makefile b/src/test/run-make/print-cfg/Makefile index a820a463f4a0..82fa3f6a3c5e 100644 --- a/src/test/run-make/print-cfg/Makefile +++ b/src/test/run-make/print-cfg/Makefile @@ -5,7 +5,7 @@ all: default $(RUSTC) --target x86_64-pc-windows-gnu --print cfg | grep x86_64 $(RUSTC) --target i686-pc-windows-msvc --print cfg | grep msvc $(RUSTC) --target i686-apple-darwin --print cfg | grep macos - $(RUSTC) --target i686-unknown-linux-gnu --print cfg | grep sse2 + $(RUSTC) --target i686-unknown-linux-gnu --print cfg | grep gnu ifdef IS_WINDOWS default: diff --git a/src/test/run-pass/sse2.rs b/src/test/run-pass/sse2.rs index 8d88c17af79b..c27f83011cb1 100644 --- a/src/test/run-pass/sse2.rs +++ b/src/test/run-pass/sse2.rs @@ -7,6 +7,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. +// min-llvm-version 4.0 #![feature(cfg_target_feature)] From 9b2f7624ecb743e9db8e135113f396a7956623e7 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 10 Jul 2017 17:44:46 -0700 Subject: [PATCH 032/213] syntax: Add `tokens: Option` to Item This commit adds a new field to the `Item` AST node in libsyntax to optionally contain the original token stream that the item itself was parsed from. This is currently `None` everywhere but is intended for use later with procedural macros. --- src/librustc_metadata/cstore_impl.rs | 1 + src/libsyntax/ast.rs | 6 ++++++ src/libsyntax/diagnostics/plugin.rs | 1 + src/libsyntax/ext/build.rs | 6 ++++-- src/libsyntax/ext/expand.rs | 1 + src/libsyntax/ext/placeholders.rs | 1 + src/libsyntax/fold.rs | 8 ++++++-- src/libsyntax/parse/parser.rs | 1 + src/libsyntax/std_inject.rs | 2 ++ src/libsyntax/test.rs | 16 +++++++++++----- src/libsyntax_ext/global_asm.rs | 1 + 11 files changed, 35 insertions(+), 9 deletions(-) diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index 5b0612ddab60..25079613e586 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -389,6 +389,7 @@ impl CrateStore for cstore::CStore { legacy: def.legacy, }), vis: ast::Visibility::Inherited, + tokens: None, }) } diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index 4fc737873530..bd26ab5bd35a 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -1812,6 +1812,12 @@ pub struct Item { pub node: ItemKind, pub vis: Visibility, pub span: Span, + + /// Original tokens this item was parsed from. This isn't necessarily + /// available for all items, although over time more and more items should + /// have this be `Some`. Right now this is primarily used for procedural + /// macros, notably custom attributes. + pub tokens: Option, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] diff --git a/src/libsyntax/diagnostics/plugin.rs b/src/libsyntax/diagnostics/plugin.rs index 2a5de3c7382a..855f4cd35570 100644 --- a/src/libsyntax/diagnostics/plugin.rs +++ b/src/libsyntax/diagnostics/plugin.rs @@ -236,6 +236,7 @@ pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt, ), vis: ast::Visibility::Public, span: span, + tokens: None, }) ])) } diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs index e004f7354ebd..de0538e38b3c 100644 --- a/src/libsyntax/ext/build.rs +++ b/src/libsyntax/ext/build.rs @@ -979,7 +979,8 @@ impl<'a> AstBuilder for ExtCtxt<'a> { id: ast::DUMMY_NODE_ID, node: node, vis: ast::Visibility::Inherited, - span: span + span: span, + tokens: None, }) } @@ -1147,7 +1148,8 @@ impl<'a> AstBuilder for ExtCtxt<'a> { attrs: vec![], node: ast::ItemKind::Use(vp), vis: vis, - span: sp + span: sp, + tokens: None, }) } diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index f6d56557166d..16c264e0f941 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -214,6 +214,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { ident: keywords::Invalid.ident(), id: ast::DUMMY_NODE_ID, vis: ast::Visibility::Public, + tokens: None, }))); match self.expand(krate_item).make_items().pop().map(P::unwrap) { diff --git a/src/libsyntax/ext/placeholders.rs b/src/libsyntax/ext/placeholders.rs index 4fb138d506a8..9bea641b0336 100644 --- a/src/libsyntax/ext/placeholders.rs +++ b/src/libsyntax/ext/placeholders.rs @@ -46,6 +46,7 @@ pub fn placeholder(kind: ExpansionKind, id: ast::NodeId) -> Expansion { ExpansionKind::Items => Expansion::Items(SmallVector::one(P(ast::Item { id: id, span: span, ident: ident, vis: vis, attrs: attrs, node: ast::ItemKind::Mac(mac_placeholder()), + tokens: None, }))), ExpansionKind::TraitItems => Expansion::TraitItems(SmallVector::one(ast::TraitItem { id: id, span: span, ident: ident, attrs: attrs, diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index 8c616df858a4..71802d0aa284 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -1000,6 +1000,7 @@ pub fn noop_fold_crate(Crate {module, attrs, span}: Crate, vis: ast::Visibility::Public, span: span, node: ast::ItemKind::Mod(module), + tokens: None, })).into_iter(); let (module, attrs, span) = match items.next() { @@ -1032,7 +1033,7 @@ pub fn noop_fold_item(i: P, folder: &mut T) -> SmallVector(Item {id, ident, attrs, node, vis, span}: Item, +pub fn noop_fold_item_simple(Item {id, ident, attrs, node, vis, span, tokens}: Item, folder: &mut T) -> Item { Item { id: folder.new_id(id), @@ -1040,7 +1041,10 @@ pub fn noop_fold_item_simple(Item {id, ident, attrs, node, vis, span} ident: folder.fold_ident(ident), attrs: fold_attrs(attrs, folder), node: folder.fold_item_kind(node), - span: folder.new_span(span) + span: folder.new_span(span), + tokens: tokens.map(|tokens| { + folder.fold_tts(tokens.into()).into() + }), } } diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index d1591a219b32..4f8d85a8da47 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -4653,6 +4653,7 @@ impl<'a> Parser<'a> { node: node, vis: vis, span: span, + tokens: None, // TODO: fill this in }) } diff --git a/src/libsyntax/std_inject.rs b/src/libsyntax/std_inject.rs index a8a9ae556f10..d9ed96f293a8 100644 --- a/src/libsyntax/std_inject.rs +++ b/src/libsyntax/std_inject.rs @@ -60,6 +60,7 @@ pub fn maybe_inject_crates_ref(mut krate: ast::Crate, alt_std_name: Option - folded.map(|ast::Item {id, ident, attrs, node, vis, span}| { + folded.map(|ast::Item {id, ident, attrs, node, vis, span, tokens}| { let allow_str = Symbol::intern("allow"); let dead_code_str = Symbol::intern("dead_code"); let word_vec = vec![attr::mk_list_word_item(dead_code_str)]; @@ -212,7 +212,8 @@ impl fold::Folder for EntryPointCleaner { .collect(), node: node, vis: vis, - span: span + span: span, + tokens: tokens, } }), EntryPointType::None | @@ -255,6 +256,7 @@ fn mk_reexport_mod(cx: &mut TestCtxt, node: ast::ItemKind::Mod(reexport_mod), vis: ast::Visibility::Public, span: DUMMY_SP, + tokens: None, })).pop().unwrap(); (it, sym) @@ -465,7 +467,8 @@ fn mk_std(cx: &TestCtxt) -> P { node: vi, attrs: vec![], vis: vis, - span: sp + span: sp, + tokens: None, }) } @@ -506,7 +509,8 @@ fn mk_main(cx: &mut TestCtxt) -> P { id: ast::DUMMY_NODE_ID, node: main, vis: ast::Visibility::Public, - span: sp + span: sp, + tokens: None, }) } @@ -536,6 +540,7 @@ fn mk_test_module(cx: &mut TestCtxt) -> (P, Option>) { node: item_, vis: ast::Visibility::Public, span: DUMMY_SP, + tokens: None, })).pop().unwrap(); let reexport = cx.reexport_test_harness_main.map(|s| { // building `use = __test::main` @@ -551,7 +556,8 @@ fn mk_test_module(cx: &mut TestCtxt) -> (P, Option>) { attrs: vec![], node: ast::ItemKind::Use(P(use_path)), vis: ast::Visibility::Inherited, - span: DUMMY_SP + span: DUMMY_SP, + tokens: None, })).pop().unwrap() }); diff --git a/src/libsyntax_ext/global_asm.rs b/src/libsyntax_ext/global_asm.rs index dc67e1c45f6e..8b0bb8cb891e 100644 --- a/src/libsyntax_ext/global_asm.rs +++ b/src/libsyntax_ext/global_asm.rs @@ -61,5 +61,6 @@ pub fn expand_global_asm<'cx>(cx: &'cx mut ExtCtxt, })), vis: ast::Visibility::Inherited, span: sp, + tokens: None, }))) } From 36f2816a1e070d33c27c07f176e692d87781d228 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 10 Jul 2017 17:49:18 -0700 Subject: [PATCH 033/213] proc_macro: Use an item's tokens if available This partly resolves the `FIXME` located in `src/libproc_macro/lib.rs` when interpreting interpolated tokens. All instances of `ast::Item` which have a list of tokens attached to them now use that list of tokens to losslessly get converted into a `TokenTree` instead of going through stringification and losing span information. cc #43081 --- src/libproc_macro/lib.rs | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index 12ed4fba402b..0f0d4138062f 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -509,14 +509,26 @@ impl TokenTree { Ident(ident) | Lifetime(ident) => TokenNode::Term(Term(ident.name)), Literal(..) | DocComment(..) => TokenNode::Literal(self::Literal(token)), - Interpolated(ref nt) => __internal::with_sess(|(sess, _)| { - TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| { - // FIXME(jseyfried): Avoid this pretty-print + reparse hack - let name = "".to_owned(); - let source = pprust::token_to_string(&token); - parse_stream_from_source_str(name, source, sess, Some(span)) - }))) - }), + Interpolated(ref nt) => { + let mut node = None; + if let Nonterminal::NtItem(ref item) = nt.0 { + if let Some(ref tokens) = item.tokens { + node = Some(TokenNode::Group(Delimiter::None, + TokenStream(tokens.clone()))); + } + } + + node.unwrap_or_else(|| { + __internal::with_sess(|(sess, _)| { + TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| { + // FIXME(jseyfried): Avoid this pretty-print + reparse hack + let name = "".to_owned(); + let source = pprust::token_to_string(&token); + parse_stream_from_source_str(name, source, sess, Some(span)) + }))) + }) + }) + } OpenDelim(..) | CloseDelim(..) => unreachable!(), Whitespace | Comment | Shebang(..) | Eof => unreachable!(), From 036300aadd5b6eb309de32c1b07f57f3aa2a13cd Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 11 Jul 2017 08:29:08 -0700 Subject: [PATCH 034/213] Add a failing test for errors in proc macros This test currently fails because the tokenization of an AST item during the expansion of a procedural macro attribute rounds-trips through strings, losing span information. --- .../proc-macro/attribute-with-error.rs | 25 +++++++++++++++++++ .../auxiliary/attribute-with-error.rs | 24 ++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs create mode 100644 src/test/compile-fail-fulldeps/proc-macro/auxiliary/attribute-with-error.rs diff --git a/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs b/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs new file mode 100644 index 000000000000..a74343cda864 --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// aux-build:attribute-with-error.rs + +#![feature(proc_macro)] + +extern crate attribute_with_error; + +#[attribute_with_error::foo] +fn test() { + let a: i32 = "foo"; + //~^ ERROR: mismatched types +} + +fn main() { + test(); +} diff --git a/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attribute-with-error.rs b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attribute-with-error.rs new file mode 100644 index 000000000000..85a7a0bf6336 --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attribute-with-error.rs @@ -0,0 +1,24 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// force-host +// no-prefer-dynamic + +#![crate_type = "proc-macro"] +#![feature(proc_macro)] + +extern crate proc_macro; + +use proc_macro::TokenStream; + +#[proc_macro_attribute] +pub fn foo(_attr: TokenStream, input: TokenStream) -> TokenStream { + input.into_iter().collect() +} From 6375b77ebb640001e9d076eec8601d926d2543f7 Mon Sep 17 00:00:00 2001 From: topecongiro Date: Thu, 27 Jul 2017 13:37:35 +0900 Subject: [PATCH 035/213] Add Span to ast::WhereClause --- src/libsyntax/ast.rs | 2 ++ src/libsyntax/fold.rs | 5 +++-- src/libsyntax/parse/mod.rs | 1 + src/libsyntax/parse/parser.rs | 4 ++++ src/libsyntax/print/pprust.rs | 2 ++ src/libsyntax_ext/deriving/generic/ty.rs | 1 + 6 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index f7d9d532062a..ff9266fb0cba 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -321,6 +321,7 @@ impl Default for Generics { where_clause: WhereClause { id: DUMMY_NODE_ID, predicates: Vec::new(), + span: DUMMY_SP, }, span: DUMMY_SP, } @@ -332,6 +333,7 @@ impl Default for Generics { pub struct WhereClause { pub id: NodeId, pub predicates: Vec, + pub span: Span, } /// A single predicate in a `where` clause diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index eaec1eef1722..714f02969ec4 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -736,14 +736,15 @@ pub fn noop_fold_generics(Generics {ty_params, lifetimes, where_claus } pub fn noop_fold_where_clause( - WhereClause {id, predicates}: WhereClause, + WhereClause {id, predicates, span}: WhereClause, fld: &mut T) -> WhereClause { WhereClause { id: fld.new_id(id), predicates: predicates.move_map(|predicate| { fld.fold_where_predicate(predicate) - }) + }), + span: span, } } diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index bd9a621c00c0..3c44ca7f3322 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -885,6 +885,7 @@ mod tests { where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: syntax_pos::DUMMY_SP, }, diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 2cd84d202ffc..9fb4f4813e95 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -4373,6 +4373,7 @@ impl<'a> Parser<'a> { where_clause: WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: span_lo.to(self.prev_span), }) @@ -4440,11 +4441,13 @@ impl<'a> Parser<'a> { let mut where_clause = WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }; if !self.eat_keyword(keywords::Where) { return Ok(where_clause); } + let lo = self.prev_span; // This is a temporary future proofing. // @@ -4522,6 +4525,7 @@ impl<'a> Parser<'a> { } } + where_clause.span = lo.to(self.prev_span); Ok(where_clause) } diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index b052b2cdbbb5..e9d11e73837a 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -1041,6 +1041,7 @@ impl<'a> State<'a> { where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: syntax_pos::DUMMY_SP, }; @@ -2983,6 +2984,7 @@ impl<'a> State<'a> { where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: syntax_pos::DUMMY_SP, }, span: syntax_pos::DUMMY_SP, }; diff --git a/src/libsyntax_ext/deriving/generic/ty.rs b/src/libsyntax_ext/deriving/generic/ty.rs index 9c89f99cbb5b..f5ac1743920c 100644 --- a/src/libsyntax_ext/deriving/generic/ty.rs +++ b/src/libsyntax_ext/deriving/generic/ty.rs @@ -216,6 +216,7 @@ fn mk_generics(lifetimes: Vec, ty_params: Vec, s where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), + span: span, }, span: span, } From 4886ec86651a5eaae1ddc834a941842904a5db61 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 12 Jul 2017 09:50:05 -0700 Subject: [PATCH 036/213] syntax: Capture a `TokenStream` when parsing items This is then later used by `proc_macro` to generate a new `proc_macro::TokenTree` which preserves span information. Unfortunately this isn't a bullet-proof approach as it doesn't handle the case when there's still other attributes on the item, especially inner attributes. Despite this the intention here is to solve the primary use case for procedural attributes, attached to functions as outer attributes, likely bare. In this situation we should be able to now yield a lossless stream of tokens to preserve span information. --- src/libproc_macro/lib.rs | 63 ++++++++- src/libsyntax/ast.rs | 7 + src/libsyntax/ext/placeholders.rs | 2 + src/libsyntax/fold.rs | 13 +- src/libsyntax/parse/mod.rs | 9 +- src/libsyntax/parse/parser.rs | 130 +++++++++++++++++- src/libsyntax_ext/deriving/generic/mod.rs | 2 + .../proc-macro/attribute-with-error.rs | 34 ++++- .../proc-macro/attributes-included.rs | 30 ++++ .../auxiliary/attributes-included.rs | 130 ++++++++++++++++++ 10 files changed, 399 insertions(+), 21 deletions(-) create mode 100644 src/test/compile-fail-fulldeps/proc-macro/attributes-included.rs create mode 100644 src/test/compile-fail-fulldeps/proc-macro/auxiliary/attributes-included.rs diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index 0f0d4138062f..1bffffd6c9e7 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -510,15 +510,38 @@ impl TokenTree { Literal(..) | DocComment(..) => TokenNode::Literal(self::Literal(token)), Interpolated(ref nt) => { - let mut node = None; - if let Nonterminal::NtItem(ref item) = nt.0 { - if let Some(ref tokens) = item.tokens { - node = Some(TokenNode::Group(Delimiter::None, - TokenStream(tokens.clone()))); + // An `Interpolated` token means that we have a `Nonterminal` + // which is often a parsed AST item. At this point we now need + // to convert the parsed AST to an actual token stream, e.g. + // un-parse it basically. + // + // Unfortunately there's not really a great way to do that in a + // guaranteed lossless fashion right now. The fallback here is + // to just stringify the AST node and reparse it, but this loses + // all span information. + // + // As a result, some AST nodes are annotated with the token + // stream they came from. Attempt to extract these lossless + // token streams before we fall back to the stringification. + let mut tokens = None; + + match nt.0 { + Nonterminal::NtItem(ref item) => { + tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); } + Nonterminal::NtTraitItem(ref item) => { + tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); + } + Nonterminal::NtImplItem(ref item) => { + tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span); + } + _ => {} } - node.unwrap_or_else(|| { + tokens.map(|tokens| { + TokenNode::Group(Delimiter::None, + TokenStream(tokens.clone())) + }).unwrap_or_else(|| { __internal::with_sess(|(sess, _)| { TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| { // FIXME(jseyfried): Avoid this pretty-print + reparse hack @@ -592,6 +615,34 @@ impl TokenTree { } } +fn prepend_attrs(attrs: &[ast::Attribute], + tokens: Option<&tokenstream::TokenStream>, + span: syntax_pos::Span) + -> Option +{ + let tokens = match tokens { + Some(tokens) => tokens, + None => return None, + }; + if attrs.len() == 0 { + return Some(tokens.clone()) + } + let mut builder = tokenstream::TokenStreamBuilder::new(); + for attr in attrs { + assert_eq!(attr.style, ast::AttrStyle::Outer, + "inner attributes should prevent cached tokens from existing"); + let stream = __internal::with_sess(|(sess, _)| { + // FIXME: Avoid this pretty-print + reparse hack as bove + let name = "".to_owned(); + let source = pprust::attr_to_string(attr); + parse_stream_from_source_str(name, source, sess, Some(span)) + }); + builder.push(stream); + } + builder.push(tokens.clone()); + Some(builder.build()) +} + /// Permanently unstable internal implementation details of this crate. This /// should not be used. /// diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index bd26ab5bd35a..fb7915415245 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -1149,6 +1149,8 @@ pub struct TraitItem { pub attrs: Vec, pub node: TraitItemKind, pub span: Span, + /// See `Item::tokens` for what this is + pub tokens: Option, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -1168,6 +1170,8 @@ pub struct ImplItem { pub attrs: Vec, pub node: ImplItemKind, pub span: Span, + /// See `Item::tokens` for what this is + pub tokens: Option, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -1817,6 +1821,9 @@ pub struct Item { /// available for all items, although over time more and more items should /// have this be `Some`. Right now this is primarily used for procedural /// macros, notably custom attributes. + /// + /// Note that the tokens here do not include the outer attributes, but will + /// include inner attributes. pub tokens: Option, } diff --git a/src/libsyntax/ext/placeholders.rs b/src/libsyntax/ext/placeholders.rs index 9bea641b0336..e3377c1d8dee 100644 --- a/src/libsyntax/ext/placeholders.rs +++ b/src/libsyntax/ext/placeholders.rs @@ -51,11 +51,13 @@ pub fn placeholder(kind: ExpansionKind, id: ast::NodeId) -> Expansion { ExpansionKind::TraitItems => Expansion::TraitItems(SmallVector::one(ast::TraitItem { id: id, span: span, ident: ident, attrs: attrs, node: ast::TraitItemKind::Macro(mac_placeholder()), + tokens: None, })), ExpansionKind::ImplItems => Expansion::ImplItems(SmallVector::one(ast::ImplItem { id: id, span: span, ident: ident, vis: vis, attrs: attrs, node: ast::ImplItemKind::Macro(mac_placeholder()), defaultness: ast::Defaultness::Final, + tokens: None, })), ExpansionKind::Pat => Expansion::Pat(P(ast::Pat { id: id, span: span, node: ast::PatKind::Mac(mac_placeholder()), diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index 71802d0aa284..279f63d13a4f 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -957,7 +957,8 @@ pub fn noop_fold_trait_item(i: TraitItem, folder: &mut T) TraitItemKind::Macro(folder.fold_mac(mac)) } }, - span: folder.new_span(i.span) + span: folder.new_span(i.span), + tokens: i.tokens, }) } @@ -980,7 +981,8 @@ pub fn noop_fold_impl_item(i: ImplItem, folder: &mut T) ast::ImplItemKind::Type(ty) => ast::ImplItemKind::Type(folder.fold_ty(ty)), ast::ImplItemKind::Macro(mac) => ast::ImplItemKind::Macro(folder.fold_mac(mac)) }, - span: folder.new_span(i.span) + span: folder.new_span(i.span), + tokens: i.tokens, }) } @@ -1042,9 +1044,10 @@ pub fn noop_fold_item_simple(Item {id, ident, attrs, node, vis, span, attrs: fold_attrs(attrs, folder), node: folder.fold_item_kind(node), span: folder.new_span(span), - tokens: tokens.map(|tokens| { - folder.fold_tts(tokens.into()).into() - }), + + // FIXME: if this is replaced with a call to `folder.fold_tts` it causes + // an ICE during resolve... odd! + tokens: tokens, } } diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index bd9a621c00c0..45e0b8404cc7 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -843,11 +843,18 @@ mod tests { // check the contents of the tt manually: #[test] fn parse_fundecl () { // this test depends on the intern order of "fn" and "i32" - assert_eq!(string_to_item("fn a (b : i32) { b; }".to_string()), + let item = string_to_item("fn a (b : i32) { b; }".to_string()).map(|m| { + m.map(|mut m| { + m.tokens = None; + m + }) + }); + assert_eq!(item, Some( P(ast::Item{ident:Ident::from_str("a"), attrs:Vec::new(), id: ast::DUMMY_NODE_ID, + tokens: None, node: ast::ItemKind::Fn(P(ast::FnDecl { inputs: vec![ast::Arg{ ty: P(ast::Ty{id: ast::DUMMY_NODE_ID, diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 4f8d85a8da47..1a10aa9d621b 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -216,6 +216,30 @@ struct TokenCursorFrame { open_delim: bool, tree_cursor: tokenstream::Cursor, close_delim: bool, + last_token: LastToken, +} + +/// This is used in `TokenCursorFrame` above to track tokens that are consumed +/// by the parser, and then that's transitively used to record the tokens that +/// each parse AST item is created with. +/// +/// Right now this has two states, either collecting tokens or not collecting +/// tokens. If we're collecting tokens we just save everything off into a local +/// `Vec`. This should eventually though likely save tokens from the original +/// token stream and just use slicing of token streams to avoid creation of a +/// whole new vector. +/// +/// The second state is where we're passively not recording tokens, but the last +/// token is still tracked for when we want to start recording tokens. This +/// "last token" means that when we start recording tokens we'll want to ensure +/// that this, the first token, is included in the output. +/// +/// You can find some more example usage of this in the `collect_tokens` method +/// on the parser. +#[derive(Clone)] +enum LastToken { + Collecting(Vec), + Was(Option), } impl TokenCursorFrame { @@ -226,6 +250,7 @@ impl TokenCursorFrame { open_delim: delimited.delim == token::NoDelim, tree_cursor: delimited.stream().into_trees(), close_delim: delimited.delim == token::NoDelim, + last_token: LastToken::Was(None), } } } @@ -250,6 +275,11 @@ impl TokenCursor { return TokenAndSpan { tok: token::Eof, sp: syntax_pos::DUMMY_SP } }; + match self.frame.last_token { + LastToken::Collecting(ref mut v) => v.push(tree.clone()), + LastToken::Was(ref mut t) => *t = Some(tree.clone()), + } + match tree { TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp }, TokenTree::Delimited(sp, ref delimited) => { @@ -1209,7 +1239,20 @@ impl<'a> Parser<'a> { /// Parse the items in a trait declaration pub fn parse_trait_item(&mut self, at_end: &mut bool) -> PResult<'a, TraitItem> { maybe_whole!(self, NtTraitItem, |x| x); - let mut attrs = self.parse_outer_attributes()?; + let attrs = self.parse_outer_attributes()?; + let (mut item, tokens) = self.collect_tokens(|this| { + this.parse_trait_item_(at_end, attrs) + })?; + // See `parse_item` for why this clause is here. + if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { + item.tokens = Some(tokens); + } + Ok(item) + } + + fn parse_trait_item_(&mut self, + at_end: &mut bool, + mut attrs: Vec) -> PResult<'a, TraitItem> { let lo = self.span; let (name, node) = if self.eat_keyword(keywords::Type) { @@ -1304,6 +1347,7 @@ impl<'a> Parser<'a> { attrs: attrs, node: node, span: lo.to(self.prev_span), + tokens: None, }) } @@ -4653,7 +4697,7 @@ impl<'a> Parser<'a> { node: node, vis: vis, span: span, - tokens: None, // TODO: fill this in + tokens: None, }) } @@ -4709,8 +4753,21 @@ impl<'a> Parser<'a> { /// Parse an impl item. pub fn parse_impl_item(&mut self, at_end: &mut bool) -> PResult<'a, ImplItem> { maybe_whole!(self, NtImplItem, |x| x); + let attrs = self.parse_outer_attributes()?; + let (mut item, tokens) = self.collect_tokens(|this| { + this.parse_impl_item_(at_end, attrs) + })?; - let mut attrs = self.parse_outer_attributes()?; + // See `parse_item` for why this clause is here. + if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { + item.tokens = Some(tokens); + } + Ok(item) + } + + fn parse_impl_item_(&mut self, + at_end: &mut bool, + mut attrs: Vec) -> PResult<'a, ImplItem> { let lo = self.span; let vis = self.parse_visibility(false)?; let defaultness = self.parse_defaultness()?; @@ -4742,7 +4799,8 @@ impl<'a> Parser<'a> { vis: vis, defaultness: defaultness, attrs: attrs, - node: node + node: node, + tokens: None, }) } @@ -6018,9 +6076,71 @@ impl<'a> Parser<'a> { Ok(None) } + fn collect_tokens(&mut self, f: F) -> PResult<'a, (R, TokenStream)> + where F: FnOnce(&mut Self) -> PResult<'a, R> + { + // Record all tokens we parse when parsing this item. + let mut tokens = Vec::new(); + match self.token_cursor.frame.last_token { + LastToken::Collecting(_) => { + panic!("cannot collect tokens recursively yet") + } + LastToken::Was(ref mut last) => tokens.extend(last.take()), + } + self.token_cursor.frame.last_token = LastToken::Collecting(tokens); + let prev = self.token_cursor.stack.len(); + let ret = f(self); + let last_token = if self.token_cursor.stack.len() == prev { + &mut self.token_cursor.frame.last_token + } else { + &mut self.token_cursor.stack[prev].last_token + }; + let mut tokens = match *last_token { + LastToken::Collecting(ref mut v) => mem::replace(v, Vec::new()), + LastToken::Was(_) => panic!("our vector went away?"), + }; + + // If we're not at EOF our current token wasn't actually consumed by + // `f`, but it'll still be in our list that we pulled out. In that case + // put it back. + if self.token == token::Eof { + *last_token = LastToken::Was(None); + } else { + *last_token = LastToken::Was(tokens.pop()); + } + + Ok((ret?, tokens.into_iter().collect())) + } + pub fn parse_item(&mut self) -> PResult<'a, Option>> { let attrs = self.parse_outer_attributes()?; - self.parse_item_(attrs, true, false) + + let (ret, tokens) = self.collect_tokens(|this| { + this.parse_item_(attrs, true, false) + })?; + + // Once we've parsed an item and recorded the tokens we got while + // parsing we may want to store `tokens` into the item we're about to + // return. Note, though, that we specifically didn't capture tokens + // related to outer attributes. The `tokens` field here may later be + // used with procedural macros to convert this item back into a token + // stream, but during expansion we may be removing attributes as we go + // along. + // + // If we've got inner attributes then the `tokens` we've got above holds + // these inner attributes. If an inner attribute is expanded we won't + // actually remove it from the token stream, so we'll just keep yielding + // it (bad!). To work around this case for now we just avoid recording + // `tokens` if we detect any inner attributes. This should help keep + // expansion correct, but we should fix this bug one day! + Ok(ret.map(|item| { + item.map(|mut i| { + if !i.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { + i.tokens = Some(tokens); + } + i + }) + })) } fn parse_path_list_items(&mut self) -> PResult<'a, Vec> { diff --git a/src/libsyntax_ext/deriving/generic/mod.rs b/src/libsyntax_ext/deriving/generic/mod.rs index 4acd65bbf865..3cbc7938bde0 100644 --- a/src/libsyntax_ext/deriving/generic/mod.rs +++ b/src/libsyntax_ext/deriving/generic/mod.rs @@ -504,6 +504,7 @@ impl<'a> TraitDef<'a> { defaultness: ast::Defaultness::Final, attrs: Vec::new(), node: ast::ImplItemKind::Type(type_def.to_ty(cx, self.span, type_ident, generics)), + tokens: None, } }); @@ -930,6 +931,7 @@ impl<'a> MethodDef<'a> { decl: fn_decl, }, body_block), + tokens: None, } } diff --git a/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs b/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs index a74343cda864..65f4b6350c4e 100644 --- a/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs +++ b/src/test/compile-fail-fulldeps/proc-macro/attribute-with-error.rs @@ -14,12 +14,38 @@ extern crate attribute_with_error; -#[attribute_with_error::foo] -fn test() { +use attribute_with_error::foo; + +#[foo] +fn test1() { let a: i32 = "foo"; //~^ ERROR: mismatched types } -fn main() { - test(); +fn test2() { + #![foo] + + // FIXME: should have a type error here and assert it works but it doesn't +} + +trait A { + // FIXME: should have a #[foo] attribute here and assert that it works + fn foo(&self) { + let a: i32 = "foo"; + //~^ ERROR: mismatched types + } +} + +struct B; + +impl A for B { + #[foo] + fn foo(&self) { + let a: i32 = "foo"; + //~^ ERROR: mismatched types + } +} + +#[foo] +fn main() { } diff --git a/src/test/compile-fail-fulldeps/proc-macro/attributes-included.rs b/src/test/compile-fail-fulldeps/proc-macro/attributes-included.rs new file mode 100644 index 000000000000..508f8dac5711 --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/attributes-included.rs @@ -0,0 +1,30 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// aux-build:attributes-included.rs + +#![feature(proc_macro, rustc_attrs)] + +extern crate attributes_included; + +#[attributes_included::bar] +#[inline] +/// doc +#[attributes_included::foo] +#[inline] +/// doc +fn foo() { + let a: i32 = "foo"; //~ WARN: unused variable +} + +#[rustc_error] +fn main() { //~ ERROR: compilation successful + foo() +} diff --git a/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attributes-included.rs b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attributes-included.rs new file mode 100644 index 000000000000..a1efbb88a4d2 --- /dev/null +++ b/src/test/compile-fail-fulldeps/proc-macro/auxiliary/attributes-included.rs @@ -0,0 +1,130 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// force-host +// no-prefer-dynamic + +#![feature(proc_macro)] +#![crate_type = "proc-macro"] + +extern crate proc_macro; + +use proc_macro::{TokenStream, TokenTree, TokenNode, Delimiter, Literal}; + +#[proc_macro_attribute] +pub fn foo(attr: TokenStream, input: TokenStream) -> TokenStream { + assert!(attr.is_empty()); + let input = input.into_iter().collect::>(); + { + let mut cursor = &input[..]; + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_foo(&mut cursor); + assert!(cursor.is_empty()); + } + fold_stream(input.into_iter().collect()) +} + +#[proc_macro_attribute] +pub fn bar(attr: TokenStream, input: TokenStream) -> TokenStream { + assert!(attr.is_empty()); + let input = input.into_iter().collect::>(); + { + let mut cursor = &input[..]; + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_invoc(&mut cursor); + assert_inline(&mut cursor); + assert_doc(&mut cursor); + assert_foo(&mut cursor); + assert!(cursor.is_empty()); + } + input.into_iter().collect() +} + +fn assert_inline(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Op('#', _) => {} + _ => panic!("expected '#' char"), + } + match slice[1].kind { + TokenNode::Group(Delimiter::Bracket, _) => {} + _ => panic!("expected brackets"), + } + *slice = &slice[2..]; +} + +fn assert_doc(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Literal(_) => {} + _ => panic!("expected literal doc comment got other"), + } + *slice = &slice[1..]; +} + +fn assert_invoc(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Op('#', _) => {} + _ => panic!("expected '#' char"), + } + match slice[1].kind { + TokenNode::Group(Delimiter::Bracket, _) => {} + _ => panic!("expected brackets"), + } + *slice = &slice[2..]; +} + +fn assert_foo(slice: &mut &[TokenTree]) { + match slice[0].kind { + TokenNode::Term(ref name) => assert_eq!(name.as_str(), "fn"), + _ => panic!("expected fn"), + } + match slice[1].kind { + TokenNode::Term(ref name) => assert_eq!(name.as_str(), "foo"), + _ => panic!("expected foo"), + } + match slice[2].kind { + TokenNode::Group(Delimiter::Parenthesis, ref s) => assert!(s.is_empty()), + _ => panic!("expected parens"), + } + match slice[3].kind { + TokenNode::Group(Delimiter::Brace, _) => {} + _ => panic!("expected braces"), + } + *slice = &slice[4..]; +} + +fn fold_stream(input: TokenStream) -> TokenStream { + input.into_iter().map(fold_tree).collect() +} + +fn fold_tree(input: TokenTree) -> TokenTree { + TokenTree { + span: input.span, + kind: fold_node(input.kind), + } +} + +fn fold_node(input: TokenNode) -> TokenNode { + match input { + TokenNode::Group(a, b) => TokenNode::Group(a, fold_stream(b)), + TokenNode::Op(a, b) => TokenNode::Op(a, b), + TokenNode::Term(a) => TokenNode::Term(a), + TokenNode::Literal(a) => { + if a.to_string() != "\"foo\"" { + TokenNode::Literal(a) + } else { + TokenNode::Literal(Literal::integer(3)) + } + } + } +} From 069a1b3c8f514b04251788108ecebfa672e942ae Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 28 Jul 2017 12:17:52 -0700 Subject: [PATCH 037/213] rustbuild: Tweak how we cross-compile LLVM In preparation for upgrading to LLVM 5.0 it looks like we need to tweak how we cross compile LLVM slightly. It's using `CMAKE_SYSTEM_NAME` to infer whether to build libFuzzer which only works on some platforms, and then once we configure that it needs to apparently reach into the host build area to try to compile `llvm-config` as well. Once these are both configured, though, it looks like we can successfully cross-compile LLVM. --- src/bootstrap/native.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index f0dfd857ab61..bb80674c8877 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -154,6 +154,14 @@ impl Step for Llvm { let host = build.llvm_out(build.build).join("bin/llvm-tblgen"); cfg.define("CMAKE_CROSSCOMPILING", "True") .define("LLVM_TABLEGEN", &host); + + if target.contains("netbsd") { + cfg.define("CMAKE_SYSTEM_NAME", "NetBSD"); + } else if target.contains("freebsd") { + cfg.define("CMAKE_SYSTEM_NAME", "FreeBSD"); + } + + cfg.define("LLVM_NATIVE_BUILD", build.llvm_out(build.build).join("build")); } let sanitize_cc = |cc: &Path| { From 122fd188aefa4d275e23880fc9836f664cd08582 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 28 Jul 2017 14:26:02 -0700 Subject: [PATCH 038/213] rustbuild: Update cross-compilers for FreeBSD When working through bugs for the LLVM 5.0 upgrade it looks like the FreeBSD cross compilers we're currently using are unable to build LLVM, failing with references to the function `std::to_string` claiming it doesn't exist. I don't actually know what this function is, but assuming that it was added in a more recent version of a C++ standard I've updated the gcc versions for the toolchains we're using. This made the error go away! --- src/ci/docker/dist-i686-freebsd/build-toolchain.sh | 4 ++-- src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ci/docker/dist-i686-freebsd/build-toolchain.sh b/src/ci/docker/dist-i686-freebsd/build-toolchain.sh index 5642e6fc937f..8343327c33bf 100755 --- a/src/ci/docker/dist-i686-freebsd/build-toolchain.sh +++ b/src/ci/docker/dist-i686-freebsd/build-toolchain.sh @@ -13,7 +13,7 @@ set -ex ARCH=$1 BINUTILS=2.25.1 -GCC=5.3.0 +GCC=6.4.0 hide_output() { set +x @@ -86,7 +86,7 @@ rm -rf freebsd # Finally, download and build gcc to target FreeBSD mkdir gcc cd gcc -curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - +curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.gz | tar xzf - cd gcc-$GCC ./contrib/download_prerequisites diff --git a/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh b/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh index 5642e6fc937f..8343327c33bf 100755 --- a/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh +++ b/src/ci/docker/dist-x86_64-freebsd/build-toolchain.sh @@ -13,7 +13,7 @@ set -ex ARCH=$1 BINUTILS=2.25.1 -GCC=5.3.0 +GCC=6.4.0 hide_output() { set +x @@ -86,7 +86,7 @@ rm -rf freebsd # Finally, download and build gcc to target FreeBSD mkdir gcc cd gcc -curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - +curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.gz | tar xzf - cd gcc-$GCC ./contrib/download_prerequisites From 8e7849e766730f9e210330485386731cac40d346 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 28 Jul 2017 17:52:44 -0700 Subject: [PATCH 039/213] rustbuild: Use Cargo's "target runner" This commit leverages a relatively new feature in Cargo to execute cross-compiled tests, the `target.$target.runner` configuration. We configure it through environment variables in rustbuild and this avoids the need for us to locate and run tests after-the-fact, instead relying on Cargo to do all that execution for us. --- src/bootstrap/check.rs | 80 +++++++----------------------------------- src/libstd/process.rs | 15 ++++++-- 2 files changed, 26 insertions(+), 69 deletions(-) diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index b04e4de77445..c65f5a9fb48b 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -1050,11 +1050,8 @@ impl Step for Crate { dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target))); cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap()); - if target.contains("emscripten") || build.remote_tested(target) { - cargo.arg("--no-run"); - } - cargo.arg("--"); + cargo.args(&build.flags.cmd.test_args()); if build.config.quiet_tests { cargo.arg("--quiet"); @@ -1063,75 +1060,24 @@ impl Step for Crate { let _time = util::timeit(); if target.contains("emscripten") { - build.run(&mut cargo); - krate_emscripten(build, compiler, target, mode); + cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), + build.config.nodejs.as_ref().expect("nodejs not configured")); } else if build.remote_tested(target) { - build.run(&mut cargo); - krate_remote(builder, compiler, target, mode); - } else { - cargo.args(&build.flags.cmd.test_args()); - try_run(build, &mut cargo); + cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), + format!("{} run", + builder.tool_exe(Tool::RemoteTestClient).display())); } + try_run(build, &mut cargo); } } -fn krate_emscripten(build: &Build, - compiler: Compiler, - target: Interned, - mode: Mode) { - let out_dir = build.cargo_out(compiler, mode, target); - let tests = find_tests(&out_dir.join("deps"), target); - - let nodejs = build.config.nodejs.as_ref().expect("nodejs not configured"); - for test in tests { - println!("running {}", test.display()); - let mut cmd = Command::new(nodejs); - cmd.arg(&test); - if build.config.quiet_tests { - cmd.arg("--quiet"); +fn envify(s: &str) -> String { + s.chars().map(|c| { + match c { + '-' => '_', + c => c, } - try_run(build, &mut cmd); - } -} - -fn krate_remote(builder: &Builder, - compiler: Compiler, - target: Interned, - mode: Mode) { - let build = builder.build; - let out_dir = build.cargo_out(compiler, mode, target); - let tests = find_tests(&out_dir.join("deps"), target); - - let tool = builder.tool_exe(Tool::RemoteTestClient); - for test in tests { - let mut cmd = Command::new(&tool); - cmd.arg("run") - .arg(&test); - if build.config.quiet_tests { - cmd.arg("--quiet"); - } - cmd.args(&build.flags.cmd.test_args()); - try_run(build, &mut cmd); - } -} - -fn find_tests(dir: &Path, target: Interned) -> Vec { - let mut dst = Vec::new(); - for e in t!(dir.read_dir()).map(|e| t!(e)) { - let file_type = t!(e.file_type()); - if !file_type.is_file() { - continue - } - let filename = e.file_name().into_string().unwrap(); - if (target.contains("windows") && filename.ends_with(".exe")) || - (!target.contains("windows") && !filename.contains(".")) || - (target.contains("emscripten") && - filename.ends_with(".js") && - !filename.ends_with(".asm.js")) { - dst.push(e.path()); - } - } - dst + }).flat_map(|c| c.to_uppercase()).collect() } /// Some test suites are run inside emulators or on remote devices, and most diff --git a/src/libstd/process.rs b/src/libstd/process.rs index 31809e382398..a872e7eee060 100644 --- a/src/libstd/process.rs +++ b/src/libstd/process.rs @@ -1417,8 +1417,19 @@ mod tests { let output = String::from_utf8(result.stdout).unwrap(); for (ref k, ref v) in env::vars() { - // don't check android RANDOM variables - if cfg!(target_os = "android") && *k == "RANDOM" { + // Don't check android RANDOM variable which seems to change + // whenever the shell runs, and our `env_cmd` is indeed running a + // shell which means it'll get a different RANDOM than we probably + // have. + // + // Also skip env vars with `-` in the name on android because, well, + // I'm not sure. It appears though that the `set` command above does + // not print env vars with `-` in the name, so we just skip them + // here as we won't find them in the output. Note that most env vars + // use `_` instead of `-`, but our build system sets a few env vars + // with `-` in the name. + if cfg!(target_os = "android") && + (*k == "RANDOM" || k.contains("-")) { continue } From cb93cc62999eda4145a878f26e3fe912b2dcbb22 Mon Sep 17 00:00:00 2001 From: gaurikholkar Date: Sat, 29 Jul 2017 17:37:05 +0530 Subject: [PATCH 040/213] changing E0623 error message --- src/librustc/infer/error_reporting/anon_anon_conflict.rs | 4 ++-- .../ui/lifetime-errors/ex3-both-anon-regions-2.stderr | 4 ++-- .../ui/lifetime-errors/ex3-both-anon-regions-3.stderr | 4 ++-- .../ui/lifetime-errors/ex3-both-anon-regions-4.stderr | 8 ++++---- src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/librustc/infer/error_reporting/anon_anon_conflict.rs b/src/librustc/infer/error_reporting/anon_anon_conflict.rs index 1017f2bd0e6e..2e9109688181 100644 --- a/src/librustc/infer/error_reporting/anon_anon_conflict.rs +++ b/src/librustc/infer/error_reporting/anon_anon_conflict.rs @@ -77,10 +77,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { struct_span_err!(self.tcx.sess, span, E0623, "lifetime mismatch") .span_label(ty1.span, - format!("these references must have the same lifetime")) + format!("these references are not declared with the same lifetime...")) .span_label(ty2.span, format!("")) .span_label(span, - format!("data{}flows{}here", span_label_var1, span_label_var2)) + format!("...but data{}flows{}here", span_label_var1, span_label_var2)) .emit(); } else { return false; diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr index 8dd906afdc4e..4c878f3c0dc0 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions-2.stderr @@ -2,9 +2,9 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-2.rs:12:9 | 11 | fn foo((v, w): (&u8, &u8), x: &u8) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | v = x; - | ^ data from `x` flows here + | ^ ...but data from `x` flows here error: aborting due to previous error diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr index 66c3ca45499b..08506b8befa0 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions-3.stderr @@ -2,9 +2,9 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-3.rs:12:9 | 11 | fn foo((v, w): (&u8, &u8), (x, y): (&u8, &u8)) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | v = x; - | ^ data flows here + | ^ ...but data flows here error: aborting due to previous error diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr index b969797b3744..9c2630fc8114 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions-4.stderr @@ -4,17 +4,17 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-4.rs:12:13 | 11 | fn foo(z: &mut Vec<(&u8,&u8)>, (x, y): (&u8, &u8)) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | z.push((x,y)); - | ^ data flows into `z` here + | ^ ...but data flows into `z` here error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions-4.rs:12:15 | 11 | fn foo(z: &mut Vec<(&u8,&u8)>, (x, y): (&u8, &u8)) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | z.push((x,y)); - | ^ data flows into `z` here + | ^ ...but data flows into `z` here error: aborting due to 3 previous errors diff --git a/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr b/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr index e38e2ef07ad8..a183d1fffc0b 100644 --- a/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr +++ b/src/test/ui/lifetime-errors/ex3-both-anon-regions.stderr @@ -2,9 +2,9 @@ error[E0623]: lifetime mismatch --> $DIR/ex3-both-anon-regions.rs:12:12 | 11 | fn foo(x: &mut Vec<&u8>, y: &u8) { - | --- --- these references must have the same lifetime + | --- --- these references are not declared with the same lifetime... 12 | x.push(y); - | ^ data from `y` flows into `x` here + | ^ ...but data from `y` flows into `x` here error: aborting due to previous error From 5636d325ed50d859091b93abfed1c6d6fd8cff56 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sat, 29 Jul 2017 14:35:09 +0200 Subject: [PATCH 041/213] Update cargo version --- src/Cargo.lock | 13 +++++++++++-- src/tools/cargo | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index 1df8bcf74cd8..c4ab56a95768 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -187,7 +187,7 @@ dependencies = [ "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -323,7 +323,7 @@ name = "crates-io" version = "0.11.0" dependencies = [ "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -436,6 +436,14 @@ dependencies = [ "backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "error-chain" +version = "0.11.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "error_index_generator" version = "0.0.0" @@ -2150,6 +2158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f" "checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" "checksum error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9435d864e017c3c6afeac1654189b06cdb491cf2ff73dbf0d73b0f292f42ff8" +"checksum error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)" = "38d3a55d9a7a456748f2a3912c0941a5d9a68006eb15b3c3c9836b8420dc102d" "checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922" "checksum flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)" = "36df0166e856739905cd3d7e0b210fe818592211a008862599845e012d8d304c" "checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" diff --git a/src/tools/cargo b/src/tools/cargo index 88aa6423a164..305bc25d5e10 160000 --- a/src/tools/cargo +++ b/src/tools/cargo @@ -1 +1 @@ -Subproject commit 88aa6423a164774d09abc78a24e74e8e665f651b +Subproject commit 305bc25d5e105e84ffe261655b46cf74570f6e5b From 612081a78d136c7ad0b63dd3454ceb727d0e69c5 Mon Sep 17 00:00:00 2001 From: QuietMisdreavus Date: Thu, 27 Jul 2017 19:21:10 -0500 Subject: [PATCH 042/213] print associated types in traits "implementors" section --- src/librustdoc/html/render.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index e89bd7aae9bf..60735fbe73c6 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -2235,6 +2235,13 @@ fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _ => false, }; fmt_impl_for_trait_page(&implementor.impl_, w, use_absolute)?; + for it in &implementor.impl_.items { + if let clean::TypedefItem(ref tydef, _) = it.inner { + write!(w, " ")?; + assoc_type(w, it, &vec![], Some(&tydef.type_), AssocItemLink::Anchor(None))?; + write!(w, ";")?; + } + } writeln!(w, "")?; } } From 80cf3f99f4a3377fd9b544d18017ef29b8713dfd Mon Sep 17 00:00:00 2001 From: Vadim Petrochenkov Date: Sat, 29 Jul 2017 17:19:57 +0300 Subject: [PATCH 043/213] Cleanup some remains of `hr_lifetime_in_assoc_type` compatibility lint --- src/librustc/ich/impls_ty.rs | 1 - src/librustc/infer/error_reporting/mod.rs | 35 +------- src/librustc/infer/higher_ranked/mod.rs | 56 ++----------- src/librustc/infer/mod.rs | 7 +- src/librustc/middle/resolve_lifetime.rs | 38 ++------- src/librustc/traits/project.rs | 41 +++------- src/librustc/ty/error.rs | 14 ++-- src/librustc/ty/mod.rs | 2 - src/librustc/ty/structural_impls.rs | 24 +++--- src/librustc/ty/sty.rs | 14 ---- src/librustc_typeck/astconv.rs | 80 +++++++++---------- src/librustc_typeck/check/mod.rs | 2 +- src/librustc_typeck/collect.rs | 2 - .../cache/project-fn-ret-contravariant.rs | 24 +++--- .../cache/project-fn-ret-invariant.rs | 46 +++++------ src/test/compile-fail/hr-subtype.rs | 3 - .../regions-fn-subtyping-return-static.stderr | 2 - 17 files changed, 113 insertions(+), 278 deletions(-) diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index 3e227872848e..42e4338ca307 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -368,7 +368,6 @@ for ty::RegionParameterDef { name, def_id, index, - issue_32330: _, pure_wrt_drop } = *self; diff --git a/src/librustc/infer/error_reporting/mod.rs b/src/librustc/infer/error_reporting/mod.rs index 77ec866dc808..8e8576b83e4e 100644 --- a/src/librustc/infer/error_reporting/mod.rs +++ b/src/librustc/infer/error_reporting/mod.rs @@ -66,8 +66,7 @@ use hir::map as hir_map; use hir::def_id::DefId; use middle::region; use traits::{ObligationCause, ObligationCauseCode}; -use ty::{self, TyCtxt, TypeFoldable}; -use ty::{Region, Issue32330}; +use ty::{self, Region, TyCtxt, TypeFoldable}; use ty::error::TypeError; use syntax::ast::DUMMY_NODE_ID; use syntax_pos::{Pos, Span}; @@ -713,35 +712,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.note_and_explain_type_err(diag, terr, span); } - pub fn note_issue_32330(&self, - diag: &mut DiagnosticBuilder<'tcx>, - terr: &TypeError<'tcx>) - { - debug!("note_issue_32330: terr={:?}", terr); - match *terr { - TypeError::RegionsInsufficientlyPolymorphic(_, _, Some(box Issue32330 { - fn_def_id, region_name - })) | - TypeError::RegionsOverlyPolymorphic(_, _, Some(box Issue32330 { - fn_def_id, region_name - })) => { - diag.note( - &format!("lifetime parameter `{0}` declared on fn `{1}` \ - appears only in the return type, \ - but here is required to be higher-ranked, \ - which means that `{0}` must appear in both \ - argument and return types", - region_name, - self.tcx.item_path_str(fn_def_id))); - diag.note( - &format!("this error is the result of a recent bug fix; \ - for more information, see issue #33685 \ - ")); - } - _ => {} - } - } - pub fn report_and_explain_type_error(&self, trace: TypeTrace<'tcx>, terr: &TypeError<'tcx>) @@ -761,7 +731,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } }; self.note_type_err(&mut diag, &trace.cause, None, Some(trace.values), terr); - self.note_issue_32330(&mut diag, terr); diag } @@ -934,7 +903,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { format!(" for lifetime parameter {}in trait containing associated type `{}`", br_string(br), type_name) } - infer::EarlyBoundRegion(_, name, _) => { + infer::EarlyBoundRegion(_, name) => { format!(" for lifetime parameter `{}`", name) } diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs index 541a9978341f..9ecc8b0e66b9 100644 --- a/src/librustc/infer/higher_ranked/mod.rs +++ b/src/librustc/infer/higher_ranked/mod.rs @@ -13,9 +13,7 @@ use super::{CombinedSnapshot, InferCtxt, - LateBoundRegion, HigherRankedType, - RegionVariableOrigin, SubregionOrigin, SkolemizationMap}; use super::combine::CombineFields; @@ -29,15 +27,6 @@ use util::nodemap::{FxHashMap, FxHashSet}; pub struct HrMatchResult { pub value: U, - - /// Normally, when we do a higher-ranked match operation, we - /// expect all higher-ranked regions to be constrained as part of - /// the match operation. However, in the transition period for - /// #32330, it can happen that we sometimes have unconstrained - /// regions that get instantiated with fresh variables. In that - /// case, we collect the set of unconstrained bound regions here - /// and replace them with fresh variables. - pub unconstrained_regions: Vec, } impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { @@ -108,7 +97,6 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { /// that do not appear in `T`. If that happens, those regions are /// unconstrained, and this routine replaces them with `'static`. pub fn higher_ranked_match(&mut self, - span: Span, a_pair: &Binder<(T, U)>, b_match: &T, a_is_expected: bool) @@ -158,28 +146,16 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // be any region from the sets above, except for other members of // `skol_map`. There should always be a representative if things // are properly well-formed. - let mut unconstrained_regions = vec![]; let skol_representatives: FxHashMap<_, _> = skol_resolution_map .iter() - .map(|(&skol, &(br, ref regions))| { + .map(|(&skol, &(_, ref regions))| { let representative = regions.iter() .filter(|&&r| !skol_resolution_map.contains_key(r)) .cloned() .next() - .unwrap_or_else(|| { // [1] - unconstrained_regions.push(br); - self.infcx.next_region_var( - LateBoundRegion(span, br, HigherRankedType)) - }); - - // [1] There should always be a representative, - // unless the higher-ranked region did not appear - // in the values being matched. We should reject - // as ill-formed cases that can lead to this, but - // right now we sometimes issue warnings (see - // #32330). + .expect("no representative region"); (skol, representative) }) @@ -216,10 +192,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // We are now done with these skolemized variables. self.infcx.pop_skolemized(skol_map, snapshot); - Ok(HrMatchResult { - value: a_value, - unconstrained_regions, - }) + Ok(HrMatchResult { value: a_value }) }); } @@ -657,28 +630,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { skol_br, tainted_region); - let issue_32330 = if let &ty::ReVar(vid) = tainted_region { - match self.region_vars.var_origin(vid) { - RegionVariableOrigin::EarlyBoundRegion(_, _, issue_32330) => { - issue_32330.map(Box::new) - } - _ => None - } - } else { - None - }; - - if overly_polymorphic { + return Err(if overly_polymorphic { debug!("Overly polymorphic!"); - return Err(TypeError::RegionsOverlyPolymorphic(skol_br, - tainted_region, - issue_32330)); + TypeError::RegionsOverlyPolymorphic(skol_br, tainted_region) } else { debug!("Not as polymorphic!"); - return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br, - tainted_region, - issue_32330)); - } + TypeError::RegionsInsufficientlyPolymorphic(skol_br, tainted_region) + }) } } diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 7154ce9e38f5..bf79becfe4a1 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -299,7 +299,7 @@ pub enum RegionVariableOrigin { Coercion(Span), // Region variables created as the values for early-bound regions - EarlyBoundRegion(Span, ast::Name, Option), + EarlyBoundRegion(Span, ast::Name), // Region variables created for bound regions // in a function or method that is called @@ -989,7 +989,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span: Span, def: &ty::RegionParameterDef) -> ty::Region<'tcx> { - self.next_region_var(EarlyBoundRegion(span, def.name, def.issue_32330)) + self.next_region_var(EarlyBoundRegion(span, def.name)) } /// Create a type inference variable for the given @@ -1278,14 +1278,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { -> InferResult<'tcx, HrMatchResult>> { let match_pair = match_a.map_bound(|p| (p.projection_ty.trait_ref(self.tcx), p.ty)); - let span = cause.span; let trace = TypeTrace { cause, values: TraitRefs(ExpectedFound::new(true, match_pair.skip_binder().0, match_b)) }; let mut combine = self.combine_fields(trace, param_env); - let result = combine.higher_ranked_match(span, &match_pair, &match_b, true)?; + let result = combine.higher_ranked_match(&match_pair, &match_b, true)?; Ok(InferOk { value: result, obligations: combine.obligations }) } diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index c4f785757cee..13efa94a5c94 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -153,10 +153,6 @@ pub struct NamedRegionMap { // (b) it DOES appear in the arguments. pub late_bound: NodeSet, - // Contains the node-ids for lifetimes that were (incorrectly) categorized - // as late-bound, until #32330 was fixed. - pub issue_32330: NodeMap, - // For each type and trait definition, maps type parameters // to the trait object lifetime defaults computed from them. pub object_lifetime_defaults: NodeMap>, @@ -261,7 +257,6 @@ pub fn krate(sess: &Session, let mut map = NamedRegionMap { defs: NodeMap(), late_bound: NodeSet(), - issue_32330: NodeMap(), object_lifetime_defaults: compute_object_lifetime_defaults(sess, hir_map), }; sess.track_errors(|| { @@ -303,7 +298,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { match item.node { hir::ItemFn(ref decl, _, _, _, ref generics, _) => { - self.visit_early_late(item.id, None, decl, generics, |this| { + self.visit_early_late(None, decl, generics, |this| { intravisit::walk_item(this, item); }); } @@ -355,7 +350,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { match item.node { hir::ForeignItemFn(ref decl, _, ref generics) => { - self.visit_early_late(item.id, None, decl, generics, |this| { + self.visit_early_late(None, decl, generics, |this| { intravisit::walk_foreign_item(this, item); }) } @@ -406,7 +401,6 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { if let hir::TraitItemKind::Method(ref sig, _) = trait_item.node { self.visit_early_late( - trait_item.id, Some(self.hir_map.get_parent(trait_item.id)), &sig.decl, &sig.generics, |this| intravisit::walk_trait_item(this, trait_item)) @@ -418,7 +412,6 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node { self.visit_early_late( - impl_item.id, Some(self.hir_map.get_parent(impl_item.id)), &sig.decl, &sig.generics, |this| intravisit::walk_impl_item(this, impl_item)) @@ -811,18 +804,13 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the /// ordering is not important there. fn visit_early_late(&mut self, - fn_id: ast::NodeId, parent_id: Option, decl: &'tcx hir::FnDecl, generics: &'tcx hir::Generics, walk: F) where F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>), { - let fn_def_id = self.hir_map.local_def_id(fn_id); - insert_late_bound_lifetimes(self.map, - fn_def_id, - decl, - generics); + insert_late_bound_lifetimes(self.map, decl, generics); // Find the start of nested early scopes, e.g. in methods. let mut index = 0; @@ -1549,7 +1537,6 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// not amongst the inputs to a projection. In other words, `<&'a /// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`. fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, - fn_def_id: DefId, decl: &hir::FnDecl, generics: &hir::Generics) { debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics); @@ -1607,22 +1594,9 @@ fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, // any `impl Trait` in the return type? early-bound. if appears_in_output.impl_trait { continue; } - // does not appear in the inputs, but appears in the return - // type? eventually this will be early-bound, but for now we - // just mark it so we can issue warnings. - let constrained_by_input = constrained_by_input.regions.contains(&name); - let appears_in_output = appears_in_output.regions.contains(&name); - if !constrained_by_input && appears_in_output { - debug!("inserting issue_32330 entry for {:?}, {:?} on {:?}", - lifetime.lifetime.id, - name, - fn_def_id); - map.issue_32330.insert( - lifetime.lifetime.id, - ty::Issue32330 { - fn_def_id, - region_name: name, - }); + // does not appear in the inputs, but appears in the return type? early-bound. + if !constrained_by_input.regions.contains(&name) && + appears_in_output.regions.contains(&name) { continue; } diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index b5284852747f..cae1eba5797c 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -479,9 +479,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( let obligation = Obligation::with_depth(cause.clone(), depth, param_env, projection_ty); match project_type(selcx, &obligation) { - Ok(ProjectedTy::Progress(Progress { ty: projected_ty, - mut obligations, - cacheable })) => { + Ok(ProjectedTy::Progress(Progress { ty: projected_ty, mut obligations })) => { // if projection succeeded, then what we get out of this // is also non-normalized (consider: it was derived from // an impl, where-clause etc) and hence we must @@ -490,12 +488,10 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( debug!("opt_normalize_projection_type: \ projected_ty={:?} \ depth={} \ - obligations={:?} \ - cacheable={:?}", + obligations={:?}", projected_ty, depth, - obligations, - cacheable); + obligations); let result = if projected_ty.has_projection_types() { let mut normalizer = AssociatedTypeNormalizer::new(selcx, @@ -520,8 +516,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( obligations, } }; - infcx.projection_cache.borrow_mut() - .complete(projection_ty, &result, cacheable); + infcx.projection_cache.borrow_mut().complete(projection_ty, &result); Some(result) } Ok(ProjectedTy::NoProgress(projected_ty)) => { @@ -532,8 +527,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( value: projected_ty, obligations: vec![] }; - infcx.projection_cache.borrow_mut() - .complete(projection_ty, &result, true); + infcx.projection_cache.borrow_mut().complete(projection_ty, &result); Some(result) } Err(ProjectionTyError::TooManyCandidates) => { @@ -606,7 +600,6 @@ enum ProjectedTy<'tcx> { struct Progress<'tcx> { ty: Ty<'tcx>, obligations: Vec>, - cacheable: bool, } impl<'tcx> Progress<'tcx> { @@ -614,7 +607,6 @@ impl<'tcx> Progress<'tcx> { Progress { ty: tcx.types.err, obligations: vec![], - cacheable: true } } @@ -1228,7 +1220,6 @@ fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>( Progress { ty: ty_match.value, obligations, - cacheable: ty_match.unconstrained_regions.is_empty(), } } Err(e) => { @@ -1272,7 +1263,6 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( Progress { ty: ty.subst(tcx, substs), obligations: nested, - cacheable: true } } @@ -1380,22 +1370,11 @@ impl<'tcx> ProjectionCache<'tcx> { Ok(()) } - /// Indicates that `key` was normalized to `value`. If `cacheable` is false, - /// then this result is sadly not cacheable. - fn complete(&mut self, - key: ty::ProjectionTy<'tcx>, - value: &NormalizedTy<'tcx>, - cacheable: bool) { - let fresh_key = if cacheable { - debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}", - key, value); - self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.value)) - } else { - debug!("ProjectionCacheEntry::complete: cannot cache: key={:?}, value={:?}", - key, value); - !self.map.remove(key) - }; - + /// Indicates that `key` was normalized to `value`. + fn complete(&mut self, key: ty::ProjectionTy<'tcx>, value: &NormalizedTy<'tcx>) { + debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}", + key, value); + let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.value)); assert!(!fresh_key, "never started projecting `{:?}`", key); } diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs index 3442cf0ef698..86a4f6691896 100644 --- a/src/librustc/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -39,8 +39,8 @@ pub enum TypeError<'tcx> { RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>), RegionsNotSame(Region<'tcx>, Region<'tcx>), RegionsNoOverlap(Region<'tcx>, Region<'tcx>), - RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>, Option>), - RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>, Option>), + RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>), + RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>), Sorts(ExpectedFound>), IntMismatch(ExpectedFound), FloatMismatch(ExpectedFound), @@ -116,13 +116,13 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { RegionsNoOverlap(..) => { write!(f, "lifetimes do not intersect") } - RegionsInsufficientlyPolymorphic(br, _, _) => { + RegionsInsufficientlyPolymorphic(br, _) => { write!(f, "expected bound lifetime parameter{}{}, found concrete lifetime", if br.is_named() { " " } else { "" }, br) } - RegionsOverlyPolymorphic(br, _, _) => { + RegionsOverlyPolymorphic(br, _) => { write!(f, "expected concrete lifetime, found bound lifetime parameter{}{}", if br.is_named() { " " } else { "" }, @@ -257,15 +257,15 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.note_and_explain_region(db, "...does not overlap ", region2, ""); } - RegionsInsufficientlyPolymorphic(_, conc_region, _) => { + RegionsInsufficientlyPolymorphic(_, conc_region) => { self.note_and_explain_region(db, "concrete lifetime that was found is ", conc_region, ""); } - RegionsOverlyPolymorphic(_, &ty::ReVar(_), _) => { + RegionsOverlyPolymorphic(_, &ty::ReVar(_)) => { // don't bother to print out the message below for // inference variables, it's not very illuminating. } - RegionsOverlyPolymorphic(_, conc_region, _) => { + RegionsOverlyPolymorphic(_, conc_region) => { self.note_and_explain_region(db, "expected concrete lifetime is ", conc_region, ""); } diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 804f47b5283f..2ee7149fc131 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -67,7 +67,6 @@ pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef}; pub use self::sty::{ExistentialProjection, PolyExistentialProjection}; pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; pub use self::sty::RegionKind; -pub use self::sty::Issue32330; pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid}; pub use self::sty::BoundRegion::*; pub use self::sty::InferTy::*; @@ -676,7 +675,6 @@ pub struct RegionParameterDef { pub name: Name, pub def_id: DefId, pub index: u32, - pub issue_32330: Option, /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute /// on generic parameter `'a`, asserts data of lifetime `'a` diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index f261a56cdccd..48ace804995a 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -346,13 +346,11 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { RegionsNoOverlap(a, b) => { return tcx.lift(&(a, b)).map(|(a, b)| RegionsNoOverlap(a, b)) } - RegionsInsufficientlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b, c)) + RegionsInsufficientlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b)) } - RegionsOverlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b, c)) + RegionsOverlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b)) } IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), @@ -1004,13 +1002,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { RegionsNoOverlap(a, b) => { RegionsNoOverlap(a.fold_with(folder), b.fold_with(folder)) }, - RegionsInsufficientlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - RegionsInsufficientlyPolymorphic(a, b.fold_with(folder), c) + RegionsInsufficientlyPolymorphic(a, b) => { + RegionsInsufficientlyPolymorphic(a, b.fold_with(folder)) }, - RegionsOverlyPolymorphic(a, b, ref c) => { - let c = c.clone(); - RegionsOverlyPolymorphic(a, b.fold_with(folder), c) + RegionsOverlyPolymorphic(a, b) => { + RegionsOverlyPolymorphic(a, b.fold_with(folder)) }, IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), @@ -1036,8 +1032,8 @@ impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { RegionsNoOverlap(a, b) => { a.visit_with(visitor) || b.visit_with(visitor) }, - RegionsInsufficientlyPolymorphic(_, b, _) | - RegionsOverlyPolymorphic(_, b, _) => { + RegionsInsufficientlyPolymorphic(_, b) | + RegionsOverlyPolymorphic(_, b) => { b.visit_with(visitor) }, Sorts(x) => x.visit_with(visitor), diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 5f89714b33fd..b42180b288bf 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -77,20 +77,6 @@ impl BoundRegion { } } -/// When a region changed from late-bound to early-bound when #32330 -/// was fixed, its `RegionParameterDef` will have one of these -/// structures that we can use to give nicer errors. -#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, - RustcEncodable, RustcDecodable)] -pub struct Issue32330 { - /// fn where is region declared - pub fn_def_id: DefId, - - /// name of region; duplicates the info in BrNamed but convenient - /// to have it here, and this code is only temporary - pub region_name: ast::Name, -} - /// NB: If you change this, you'll probably want to change the corresponding /// AST structure in libsyntax/ast.rs as well. #[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] diff --git a/src/librustc_typeck/astconv.rs b/src/librustc_typeck/astconv.rs index bb6e478738aa..1ec850ad7f34 100644 --- a/src/librustc_typeck/astconv.rs +++ b/src/librustc_typeck/astconv.rs @@ -1110,46 +1110,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { } hir::TyBareFn(ref bf) => { require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span); - let bare_fn_ty = self.ty_of_fn(bf.unsafety, bf.abi, &bf.decl); - - // Find any late-bound regions declared in return type that do - // not appear in the arguments. These are not wellformed. - // - // Example: - // - // for<'a> fn() -> &'a str <-- 'a is bad - // for<'a> fn(&'a String) -> &'a str <-- 'a is ok - // - // Note that we do this check **here** and not in - // `ty_of_bare_fn` because the latter is also used to make - // the types for fn items, and we do not want to issue a - // warning then. (Once we fix #32330, the regions we are - // checking for here would be considered early bound - // anyway.) - let inputs = bare_fn_ty.inputs(); - let late_bound_in_args = tcx.collect_constrained_late_bound_regions( - &inputs.map_bound(|i| i.to_owned())); - let output = bare_fn_ty.output(); - let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); - for br in late_bound_in_ret.difference(&late_bound_in_args) { - let br_name = match *br { - ty::BrNamed(_, name) => name, - _ => { - span_bug!( - bf.decl.output.span(), - "anonymous bound region {:?} in return but not args", - br); - } - }; - struct_span_err!(tcx.sess, - ast_ty.span, - E0581, - "return type references lifetime `{}`, \ - which does not appear in the fn input types", - br_name) - .emit(); - } - tcx.mk_fn_ptr(bare_fn_ty) + tcx.mk_fn_ptr(self.ty_of_fn(bf.unsafety, bf.abi, &bf.decl)) } hir::TyTraitObject(ref bounds, ref lifetime) => { self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime) @@ -1269,23 +1230,56 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { -> ty::PolyFnSig<'tcx> { debug!("ty_of_fn"); + let tcx = self.tcx(); let input_tys: Vec = decl.inputs.iter().map(|a| self.ty_of_arg(a, None)).collect(); let output_ty = match decl.output { hir::Return(ref output) => self.ast_ty_to_ty(output), - hir::DefaultReturn(..) => self.tcx().mk_nil(), + hir::DefaultReturn(..) => tcx.mk_nil(), }; debug!("ty_of_fn: output_ty={:?}", output_ty); - ty::Binder(self.tcx().mk_fn_sig( + let bare_fn_ty = ty::Binder(tcx.mk_fn_sig( input_tys.into_iter(), output_ty, decl.variadic, unsafety, abi - )) + )); + + // Find any late-bound regions declared in return type that do + // not appear in the arguments. These are not wellformed. + // + // Example: + // for<'a> fn() -> &'a str <-- 'a is bad + // for<'a> fn(&'a String) -> &'a str <-- 'a is ok + let inputs = bare_fn_ty.inputs(); + let late_bound_in_args = tcx.collect_constrained_late_bound_regions( + &inputs.map_bound(|i| i.to_owned())); + let output = bare_fn_ty.output(); + let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); + for br in late_bound_in_ret.difference(&late_bound_in_args) { + let br_name = match *br { + ty::BrNamed(_, name) => name, + _ => { + span_bug!( + decl.output.span(), + "anonymous bound region {:?} in return but not args", + br); + } + }; + struct_span_err!(tcx.sess, + decl.output.span(), + E0581, + "return type references lifetime `{}`, \ + which does not appear in the fn input types", + br_name) + .emit(); + } + + bare_fn_ty } pub fn ty_of_closure(&self, diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 7f69885047b9..cb22dcc21de2 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -1607,7 +1607,7 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { fn re_infer(&self, span: Span, def: Option<&ty::RegionParameterDef>) -> Option> { let v = match def { - Some(def) => infer::EarlyBoundRegion(span, def.name, def.issue_32330), + Some(def) => infer::EarlyBoundRegion(span, def.name), None => infer::MiscVariable(span) }; Some(self.next_region_var(v)) diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index 8780131bbcc2..f25a6cf58a79 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -979,13 +979,11 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics); let regions = early_lifetimes.enumerate().map(|(i, l)| { - let issue_32330 = tcx.named_region_map.issue_32330.get(&l.lifetime.id).cloned(); ty::RegionParameterDef { name: l.lifetime.name, index: own_start + i as u32, def_id: tcx.hir.local_def_id(l.lifetime.id), pure_wrt_drop: l.pure_wrt_drop, - issue_32330: issue_32330, } }).collect::>(); diff --git a/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs b/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs index c5557cee7cc1..0e822aff01e8 100644 --- a/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs +++ b/src/test/compile-fail/associated-types/cache/project-fn-ret-contravariant.rs @@ -43,23 +43,19 @@ fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) { (a, b) } -// FIXME(#32330) -//#[cfg(transmute)] // one instantiations: BAD -//fn baz<'a,'b>(x: &'a u32) -> &'static u32 { -// bar(foo, x) //[transmute] ERROR E0495 -//} +#[cfg(transmute)] // one instantiations: BAD +fn baz<'a,'b>(x: &'a u32) -> &'static u32 { + bar(foo, x) //[transmute]~ ERROR E0495 +} -// FIXME(#32330) -//#[cfg(krisskross)] // two instantiations, mixing and matching: BAD -//fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) { -// let a = bar(foo, y); //[krisskross] ERROR E0495 -// let b = bar(foo, x); //[krisskross] ERROR E0495 -// (a, b) -//} +#[cfg(krisskross)] // two instantiations, mixing and matching: BAD +fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) { + let a = bar(foo, y); //[krisskross]~ ERROR E0495 + let b = bar(foo, x); //[krisskross]~ ERROR E0495 + (a, b) +} #[rustc_error] fn main() { } //[ok]~^ ERROR compilation successful //[oneuse]~^^ ERROR compilation successful -//[transmute]~^^^ ERROR compilation successful -//[krisskross]~^^^^ ERROR compilation successful diff --git a/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs b/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs index a15422e42d94..10fe612980d3 100644 --- a/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs +++ b/src/test/compile-fail/associated-types/cache/project-fn-ret-invariant.rs @@ -42,35 +42,29 @@ fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { (a, b) } -// FIXME(#32330) -//#[cfg(oneuse)] // one instantiation: BAD -//fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { -// let f = foo; // <-- No consistent type can be inferred for `f` here. -// let a = bar(f, x); //[oneuse] ERROR E0495 -// let b = bar(f, y); -// (a, b) -//} +#[cfg(oneuse)] // one instantiation: BAD +fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { + let f = foo; // <-- No consistent type can be inferred for `f` here. + let a = bar(f, x); //[oneuse]~^ ERROR E0495 + let b = bar(f, y); + (a, b) +} -// FIXME(#32330) -//#[cfg(transmute)] // one instantiations: BAD -//fn baz<'a,'b>(x: Type<'a>) -> Type<'static> { -// // Cannot instantiate `foo` with any lifetime other than `'a`, -// // since it is provided as input. -// -// bar(foo, x) //[transmute] ERROR E0495 -//} +#[cfg(transmute)] // one instantiations: BAD +fn baz<'a,'b>(x: Type<'a>) -> Type<'static> { + // Cannot instantiate `foo` with any lifetime other than `'a`, + // since it is provided as input. -// FIXME(#32330) -//#[cfg(krisskross)] // two instantiations, mixing and matching: BAD -//fn transmute<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { -// let a = bar(foo, y); //[krisskross] ERROR E0495 -// let b = bar(foo, x); //[krisskross] ERROR E0495 -// (a, b) -//} + bar(foo, x) //[transmute]~ ERROR E0495 +} + +#[cfg(krisskross)] // two instantiations, mixing and matching: BAD +fn transmute<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) { + let a = bar(foo, y); //[krisskross]~ ERROR E0495 + let b = bar(foo, x); //[krisskross]~ ERROR E0495 + (a, b) +} #[rustc_error] fn main() { } //[ok]~^ ERROR compilation successful -//[oneuse]~^^ ERROR compilation successful -//[transmute]~^^^ ERROR compilation successful -//[krisskross]~^^^^ ERROR compilation successful diff --git a/src/test/compile-fail/hr-subtype.rs b/src/test/compile-fail/hr-subtype.rs index 95e469ebcfd7..c88d74d53ce9 100644 --- a/src/test/compile-fail/hr-subtype.rs +++ b/src/test/compile-fail/hr-subtype.rs @@ -91,9 +91,6 @@ check! { free_inv_x_vs_free_inv_y: (fn(Inv<'x>), // - if we are covariant, then 'a and 'b can be set to the call-site // intersection; // - if we are contravariant, then 'a can be inferred to 'static. -// -// FIXME(#32330) this is true, but we are not currently impl'ing this -// full semantics check! { bound_a_b_vs_bound_a: (for<'a,'b> fn(&'a u32, &'b u32), for<'a> fn(&'a u32, &'a u32)) } check! { bound_co_a_b_vs_bound_co_a: (for<'a,'b> fn(Co<'a>, Co<'b>), diff --git a/src/test/ui/regions-fn-subtyping-return-static.stderr b/src/test/ui/regions-fn-subtyping-return-static.stderr index 0c7b44af949b..1598a8a40d2f 100644 --- a/src/test/ui/regions-fn-subtyping-return-static.stderr +++ b/src/test/ui/regions-fn-subtyping-return-static.stderr @@ -6,8 +6,6 @@ error[E0308]: mismatched types | = note: expected type `fn(&'cx S) -> &'cx S` found type `fn(&'a S) -> &S {bar::<'_>}` - = note: lifetime parameter `'b` declared on fn `bar` appears only in the return type, but here is required to be higher-ranked, which means that `'b` must appear in both argument and return types - = note: this error is the result of a recent bug fix; for more information, see issue #33685 error: aborting due to previous error From c83f97533a29f3a8691101ce637d5fa322843d8c Mon Sep 17 00:00:00 2001 From: Ian Douglas Scott Date: Mon, 10 Jul 2017 20:44:14 -0700 Subject: [PATCH 044/213] Redox: Add JoinHandleExt (matching Unix version) --- src/libstd/sys/redox/ext/mod.rs | 3 ++ src/libstd/sys/redox/ext/thread.rs | 47 ++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 src/libstd/sys/redox/ext/thread.rs diff --git a/src/libstd/sys/redox/ext/mod.rs b/src/libstd/sys/redox/ext/mod.rs index 513ef272e979..0c1bf9e95576 100644 --- a/src/libstd/sys/redox/ext/mod.rs +++ b/src/libstd/sys/redox/ext/mod.rs @@ -33,6 +33,7 @@ pub mod ffi; pub mod fs; pub mod io; pub mod process; +pub mod thread; /// A prelude for conveniently writing platform-specific code. /// @@ -46,5 +47,7 @@ pub mod prelude { #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")] pub use super::fs::{FileTypeExt, PermissionsExt, OpenOptionsExt, MetadataExt}; #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")] + pub use super::thread::JoinHandleExt; + #[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")] pub use super::process::{CommandExt, ExitStatusExt}; } diff --git a/src/libstd/sys/redox/ext/thread.rs b/src/libstd/sys/redox/ext/thread.rs new file mode 100644 index 000000000000..52be2ccd9f96 --- /dev/null +++ b/src/libstd/sys/redox/ext/thread.rs @@ -0,0 +1,47 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Unix-specific extensions to primitives in the `std::thread` module. + +#![stable(feature = "thread_extensions", since = "1.9.0")] + +use sys_common::{AsInner, IntoInner}; +use thread::JoinHandle; + +#[stable(feature = "thread_extensions", since = "1.9.0")] +#[allow(deprecated)] +pub type RawPthread = usize; + +/// Unix-specific extensions to `std::thread::JoinHandle` +#[stable(feature = "thread_extensions", since = "1.9.0")] +pub trait JoinHandleExt { + /// Extracts the raw pthread_t without taking ownership + #[stable(feature = "thread_extensions", since = "1.9.0")] + fn as_pthread_t(&self) -> RawPthread; + + /// Consumes the thread, returning the raw pthread_t + /// + /// This function **transfers ownership** of the underlying pthread_t to + /// the caller. Callers are then the unique owners of the pthread_t and + /// must either detach or join the pthread_t once it's no longer needed. + #[stable(feature = "thread_extensions", since = "1.9.0")] + fn into_pthread_t(self) -> RawPthread; +} + +#[stable(feature = "thread_extensions", since = "1.9.0")] +impl JoinHandleExt for JoinHandle { + fn as_pthread_t(&self) -> RawPthread { + self.as_inner().id() as RawPthread + } + + fn into_pthread_t(self) -> RawPthread { + self.into_inner().into_id() as RawPthread + } +} From a30092fbf6ad73bdf11fb6eddba4e5bd66d40601 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 29 Jul 2017 08:15:37 -0600 Subject: [PATCH 045/213] Split FL and FD fcntls --- src/libstd/sys/redox/fd.rs | 4 ++-- src/libstd/sys/redox/process.rs | 12 ++++++------ src/libstd/sys/redox/syscall/flag.rs | 6 ++++-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/libstd/sys/redox/fd.rs b/src/libstd/sys/redox/fd.rs index 1b37aafef560..ba7bbdc657fc 100644 --- a/src/libstd/sys/redox/fd.rs +++ b/src/libstd/sys/redox/fd.rs @@ -57,9 +57,9 @@ impl FileDesc { } pub fn set_cloexec(&self) -> io::Result<()> { - let mut flags = cvt(syscall::fcntl(self.fd, syscall::F_GETFL, 0))?; + let mut flags = cvt(syscall::fcntl(self.fd, syscall::F_GETFD, 0))?; flags |= syscall::O_CLOEXEC; - cvt(syscall::fcntl(self.fd, syscall::F_SETFL, flags)).and(Ok(())) + cvt(syscall::fcntl(self.fd, syscall::F_SETFD, flags)).and(Ok(())) } pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { diff --git a/src/libstd/sys/redox/process.rs b/src/libstd/sys/redox/process.rs index ff1626d9b31c..17fa07b99ae3 100644 --- a/src/libstd/sys/redox/process.rs +++ b/src/libstd/sys/redox/process.rs @@ -272,21 +272,21 @@ impl Command { if let Some(fd) = stdio.stderr.fd() { t!(cvt(syscall::dup2(fd, 2, &[]))); - let mut flags = t!(cvt(syscall::fcntl(2, syscall::F_GETFL, 0))); + let mut flags = t!(cvt(syscall::fcntl(2, syscall::F_GETFD, 0))); flags &= ! syscall::O_CLOEXEC; - t!(cvt(syscall::fcntl(2, syscall::F_SETFL, flags))); + t!(cvt(syscall::fcntl(2, syscall::F_SETFD, flags))); } if let Some(fd) = stdio.stdout.fd() { t!(cvt(syscall::dup2(fd, 1, &[]))); - let mut flags = t!(cvt(syscall::fcntl(1, syscall::F_GETFL, 0))); + let mut flags = t!(cvt(syscall::fcntl(1, syscall::F_GETFD, 0))); flags &= ! syscall::O_CLOEXEC; - t!(cvt(syscall::fcntl(1, syscall::F_SETFL, flags))); + t!(cvt(syscall::fcntl(1, syscall::F_SETFD, flags))); } if let Some(fd) = stdio.stdin.fd() { t!(cvt(syscall::dup2(fd, 0, &[]))); - let mut flags = t!(cvt(syscall::fcntl(0, syscall::F_GETFL, 0))); + let mut flags = t!(cvt(syscall::fcntl(0, syscall::F_GETFD, 0))); flags &= ! syscall::O_CLOEXEC; - t!(cvt(syscall::fcntl(0, syscall::F_SETFL, flags))); + t!(cvt(syscall::fcntl(0, syscall::F_SETFD, flags))); } if let Some(g) = self.gid { diff --git a/src/libstd/sys/redox/syscall/flag.rs b/src/libstd/sys/redox/syscall/flag.rs index 65ad9842d699..892007df2b7c 100644 --- a/src/libstd/sys/redox/syscall/flag.rs +++ b/src/libstd/sys/redox/syscall/flag.rs @@ -20,8 +20,10 @@ pub const EVENT_NONE: usize = 0; pub const EVENT_READ: usize = 1; pub const EVENT_WRITE: usize = 2; -pub const F_GETFL: usize = 1; -pub const F_SETFL: usize = 2; +pub const F_GETFD: usize = 1; +pub const F_SETFD: usize = 2; +pub const F_GETFL: usize = 3; +pub const F_SETFL: usize = 4; pub const FUTEX_WAIT: usize = 0; pub const FUTEX_WAKE: usize = 1; From 54b6b23fc0e464110d8f4c6a6ab12aaeb7e2a198 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 27 Jul 2017 14:53:44 -0700 Subject: [PATCH 046/213] std: Mark `Layout::repeat` as `#[inline]` This fixes an optimization regression by allowing LLVM to see through more functions. Closes #43272 --- src/liballoc/allocator.rs | 1 + src/test/codegen/vec-optimizes-away.rs | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 src/test/codegen/vec-optimizes-away.rs diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs index efc59d2cbc86..2a1a8e73e9e4 100644 --- a/src/liballoc/allocator.rs +++ b/src/liballoc/allocator.rs @@ -207,6 +207,7 @@ impl Layout { /// of each element in the array. /// /// On arithmetic overflow, returns `None`. + #[inline] pub fn repeat(&self, n: usize) -> Option<(Self, usize)> { let padded_size = match self.size.checked_add(self.padding_needed_for(self.align)) { None => return None, diff --git a/src/test/codegen/vec-optimizes-away.rs b/src/test/codegen/vec-optimizes-away.rs new file mode 100644 index 000000000000..261564ed51ae --- /dev/null +++ b/src/test/codegen/vec-optimizes-away.rs @@ -0,0 +1,21 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +// +// no-system-llvm +// compile-flags: -O +#![crate_type="lib"] + +#[no_mangle] +pub fn sum_me() -> i32 { + // CHECK-LABEL: @sum_me + // CHECK-NEXT: {{^.*:$}} + // CHECK-NEXT: ret i32 6 + vec![1, 2, 3].iter().sum::() +} From ad1f19479c05ed4eeaaba8f207a61e3d48b0a0b7 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sat, 29 Jul 2017 13:39:43 -0700 Subject: [PATCH 047/213] rustbuild: Enable building LLVM I use this from time to time debugging LLVM builds, useful to have! --- src/bootstrap/builder.rs | 4 +++- src/bootstrap/native.rs | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 2f6e3ca9253f..811c7df5d997 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -28,6 +28,7 @@ use check; use flags::Subcommand; use doc; use tool; +use native; pub use Compiler; @@ -256,7 +257,8 @@ impl<'a> Builder<'a> { compile::StartupObjects, tool::BuildManifest, tool::Rustbook, tool::ErrorIndex, tool::UnstableBookGen, tool::Tidy, tool::Linkchecker, tool::CargoTest, tool::Compiletest, tool::RemoteTestServer, tool::RemoteTestClient, - tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc), + tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc, + native::Llvm), Kind::Test => describe!(check::Tidy, check::Bootstrap, check::DefaultCompiletest, check::HostCompiletest, check::Crate, check::CrateLibrustc, check::Linkcheck, check::Cargotest, check::Cargo, check::Rls, check::Docs, check::ErrorIndex, diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index bb80674c8877..1da277cf1812 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -48,6 +48,10 @@ impl Step for Llvm { run.path("src/llvm") } + fn make_run(run: RunConfig) { + run.builder.ensure(Llvm { target: run.target }) + } + /// Compile LLVM for `target`. fn run(self, builder: &Builder) { let build = builder.build; From b121689c6a0e01cbf95d84334464140a9bef4bba Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 28 Jul 2017 20:30:37 -0700 Subject: [PATCH 048/213] Flag docker invocations as --privileged on CI When upgrading to LLVM 5.0 it was found that the leak sanitizer tests were failing with fatal errors, but they were passing locally when run. Turns out it looks like they may be using new ptrace-like syscalls so the docker container now needs `--privileged` when executing to complete the test. --- src/ci/docker/run.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/ci/docker/run.sh b/src/ci/docker/run.sh index da74ffb41ffc..d3f339bc15f7 100755 --- a/src/ci/docker/run.sh +++ b/src/ci/docker/run.sh @@ -67,6 +67,13 @@ else args="$args --env SCCACHE_DIR=/sccache --volume $HOME/.cache/sccache:/sccache" fi +# Run containers as privileged as it should give them access to some more +# syscalls such as ptrace and whatnot. In the upgrade to LLVM 5.0 it was +# discovered that the leak sanitizer apparently needs these syscalls nowadays so +# we'll need `--privileged` for at least the `x86_64-gnu` builder, so this just +# goes ahead and sets it for all builders. +args="$args --privileged" + exec docker \ run \ --volume "$root_dir:/checkout:ro" \ From 3142ca0a6562ecf4653e2f1b1da19de6270322ec Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sat, 29 Jul 2017 20:29:50 +0200 Subject: [PATCH 049/213] Update rls submodule --- src/tools/rls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tools/rls b/src/tools/rls index 79d659e5699f..06b48d1c97dd 160000 --- a/src/tools/rls +++ b/src/tools/rls @@ -1 +1 @@ -Subproject commit 79d659e5699fbf7db5b4819e9a442fb3f550472a +Subproject commit 06b48d1c97dd69968a24b4f506e85e3a3efb7dea From 5264103de4085d61a9e47c97de3a31b1f36b2dd3 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 11 Jul 2017 14:01:07 -0700 Subject: [PATCH 050/213] add new instructions for asserting when values are valid, and to describe when we can rely on them being locked in memory --- src/librustc/ich/impls_mir.rs | 10 ++++++++-- src/librustc/mir/mod.rs | 18 ++++++++++++++++++ src/librustc/mir/visit.rs | 15 +++++++++++++-- src/librustc_mir/dataflow/drop_flag_effects.rs | 1 + src/librustc_mir/dataflow/impls/mod.rs | 1 + src/librustc_mir/dataflow/move_paths/mod.rs | 1 + src/librustc_mir/transform/qualify_consts.rs | 1 + src/librustc_mir/transform/rustc_peek.rs | 1 + src/librustc_mir/transform/type_check.rs | 1 + src/librustc_passes/mir_stats.rs | 1 + src/librustc_trans/mir/analyze.rs | 1 + src/librustc_trans/mir/constant.rs | 1 + src/librustc_trans/mir/statement.rs | 1 + 13 files changed, 49 insertions(+), 4 deletions(-) diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index 6dadb702b9f2..eb0c62a11618 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -226,8 +226,12 @@ for mir::StatementKind<'tcx> { mir::StatementKind::StorageDead(ref lvalue) => { lvalue.hash_stable(hcx, hasher); } - mir::StatementKind::EndRegion(ref extents) => { - extents.hash_stable(hcx, hasher); + mir::StatementKind::EndRegion(ref extent) => { + extent.hash_stable(hcx, hasher); + } + mir::StatementKind::Validate(ref op, ref lvalues) => { + op.hash_stable(hcx, hasher); + lvalues.hash_stable(hcx, hasher); } mir::StatementKind::Nop => {} mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { @@ -239,6 +243,8 @@ for mir::StatementKind<'tcx> { } } +impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(extent) }); + impl<'a, 'gcx, 'tcx> HashStable> for mir::Lvalue<'tcx> { fn hash_stable(&self, hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 3dcd64af2ede..c7be58c13f86 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -818,12 +818,16 @@ pub enum StatementKind<'tcx> { /// End the current live range for the storage of the local. StorageDead(Lvalue<'tcx>), + /// Execute a piece of inline Assembly. InlineAsm { asm: Box, outputs: Vec>, inputs: Vec> }, + /// Assert the given lvalues to be valid inhabitants of their type. + Validate(ValidationOp, Vec<(Ty<'tcx>, Lvalue<'tcx>)>), + /// Mark one terminating point of an extent (i.e. static region). /// (The starting point(s) arise implicitly from borrows.) EndRegion(CodeExtent), @@ -832,6 +836,13 @@ pub enum StatementKind<'tcx> { Nop, } +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, PartialEq, Eq)] +pub enum ValidationOp { + Acquire, + Release, + Suspend(CodeExtent), +} + impl<'tcx> Debug for Statement<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::StatementKind::*; @@ -839,6 +850,7 @@ impl<'tcx> Debug for Statement<'tcx> { Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv), // (reuse lifetime rendering policy from ppaux.) EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)), + Validate(ref op, ref lvalues) => write!(fmt, "Validate({:?}, {:?})", op, lvalues), StorageLive(ref lv) => write!(fmt, "StorageLive({:?})", lv), StorageDead(ref lv) => write!(fmt, "StorageDead({:?})", lv), SetDiscriminant{lvalue: ref lv, variant_index: index} => { @@ -1505,6 +1517,10 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { // trait with a `fn fold_extent`. EndRegion(ref extent) => EndRegion(extent.clone()), + Validate(ref op, ref lvals) => + Validate(op.clone(), + lvals.iter().map(|ty_and_lval| ty_and_lval.fold_with(folder)).collect()), + Nop => Nop, }; Statement { @@ -1530,6 +1546,8 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { // trait with a `fn visit_extent`. EndRegion(ref _extent) => false, + Validate(ref _op, ref lvalues) => lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)), + Nop => false, } } diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index fd3a9f8cd2d9..5284a6132396 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -333,6 +333,12 @@ macro_rules! make_mir_visitor { self.visit_assign(block, lvalue, rvalue, location); } StatementKind::EndRegion(_) => {} + StatementKind::Validate(_, ref $($mutability)* lvalues) => { + for & $($mutability)* (ref $($mutability)* ty, ref $($mutability)* lvalue) in lvalues { + self.visit_ty(ty, Lookup::Loc(location)); + self.visit_lvalue(lvalue, LvalueContext::Validate, location); + } + } StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => { self.visit_lvalue(lvalue, LvalueContext::Store, location); } @@ -784,6 +790,9 @@ pub enum LvalueContext<'tcx> { // Starting and ending a storage live range StorageLive, StorageDead, + + // Validation command + Validate, } impl<'tcx> LvalueContext<'tcx> { @@ -830,7 +839,8 @@ impl<'tcx> LvalueContext<'tcx> { LvalueContext::Borrow { kind: BorrowKind::Shared, .. } | LvalueContext::Borrow { kind: BorrowKind::Unique, .. } | LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume | - LvalueContext::StorageLive | LvalueContext::StorageDead => false, + LvalueContext::StorageLive | LvalueContext::StorageDead | + LvalueContext::Validate => false, } } @@ -842,7 +852,8 @@ impl<'tcx> LvalueContext<'tcx> { LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume => true, LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | LvalueContext::Store | LvalueContext::Call | LvalueContext::Projection(Mutability::Mut) | - LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead => false, + LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead | + LvalueContext::Validate => false, } } diff --git a/src/librustc_mir/dataflow/drop_flag_effects.rs b/src/librustc_mir/dataflow/drop_flag_effects.rs index daafbecc5dfa..24d5aa9e46bf 100644 --- a/src/librustc_mir/dataflow/drop_flag_effects.rs +++ b/src/librustc_mir/dataflow/drop_flag_effects.rs @@ -289,6 +289,7 @@ pub(crate) fn drop_flag_effects_for_location<'a, 'tcx, F>( mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => {} }, None => { diff --git a/src/librustc_mir/dataflow/impls/mod.rs b/src/librustc_mir/dataflow/impls/mod.rs index 97c996dea68f..d5bdc71a705c 100644 --- a/src/librustc_mir/dataflow/impls/mod.rs +++ b/src/librustc_mir/dataflow/impls/mod.rs @@ -486,6 +486,7 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => {} } } diff --git a/src/librustc_mir/dataflow/move_paths/mod.rs b/src/librustc_mir/dataflow/move_paths/mod.rs index fbf977b98f90..c2945d465927 100644 --- a/src/librustc_mir/dataflow/move_paths/mod.rs +++ b/src/librustc_mir/dataflow/move_paths/mod.rs @@ -416,6 +416,7 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { } StatementKind::InlineAsm { .. } | StatementKind::EndRegion(_) | + StatementKind::Validate(..) | StatementKind::Nop => {} } } diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 9bb0f07aa68a..9d01f8294e4f 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -908,6 +908,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { StatementKind::StorageDead(_) | StatementKind::InlineAsm {..} | StatementKind::EndRegion(_) | + StatementKind::Validate(..) | StatementKind::Nop => {} } }); diff --git a/src/librustc_mir/transform/rustc_peek.rs b/src/librustc_mir/transform/rustc_peek.rs index 5918de0c6881..268e7a4c185b 100644 --- a/src/librustc_mir/transform/rustc_peek.rs +++ b/src/librustc_mir/transform/rustc_peek.rs @@ -161,6 +161,7 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => continue, mir::StatementKind::SetDiscriminant{ .. } => span_bug!(stmt.source_info.span, diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index 7e6fccf30192..1c7899a46d1d 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -414,6 +414,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } StatementKind::InlineAsm { .. } | StatementKind::EndRegion(_) | + StatementKind::Validate(..) | StatementKind::Nop => {} } } diff --git a/src/librustc_passes/mir_stats.rs b/src/librustc_passes/mir_stats.rs index 9895802700ef..d5e477ff0c78 100644 --- a/src/librustc_passes/mir_stats.rs +++ b/src/librustc_passes/mir_stats.rs @@ -126,6 +126,7 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { self.record(match statement.kind { StatementKind::Assign(..) => "StatementKind::Assign", StatementKind::EndRegion(..) => "StatementKind::EndRegion", + StatementKind::Validate(..) => "StatementKind::Validate", StatementKind::SetDiscriminant { .. } => "StatementKind::SetDiscriminant", StatementKind::StorageLive(..) => "StatementKind::StorageLive", StatementKind::StorageDead(..) => "StatementKind::StorageDead", diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 45afcf51b520..598af1cda91d 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -158,6 +158,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { LvalueContext::StorageLive | LvalueContext::StorageDead | + LvalueContext::Validate | LvalueContext::Inspect | LvalueContext::Consume => {} diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 98e774a29877..c90382673a4a 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -285,6 +285,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::EndRegion(_) | mir::StatementKind::Nop => {} mir::StatementKind::InlineAsm { .. } | diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 170a76a49497..52dfc8dc4de5 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -87,6 +87,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx } mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | mir::StatementKind::Nop => bcx, } } From 735ace977c75405084cb41b3b0613d14b55c811d Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 11 Jul 2017 14:10:38 -0700 Subject: [PATCH 051/213] add a pass for validation commands; for now just emit the initial AcquireValid --- src/librustc_driver/driver.rs | 4 ++ src/librustc_mir/transform/add_validation.rs | 43 ++++++++++++++++++++ src/librustc_mir/transform/mod.rs | 1 + 3 files changed, 48 insertions(+) create mode 100644 src/librustc_mir/transform/add_validation.rs diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index c592882a1e43..68e6b0f50d1d 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -925,6 +925,10 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, let mut passes = Passes::new(); passes.push_hook(mir::transform::dump_mir::DumpMir); + // Insert AcquireValid and ReleaseValid calls. Conceptually, this + // pass is actually part of MIR building. + passes.push_pass(MIR_CONST, mir::transform::add_validation::AddValidation); + // Remove all `EndRegion` statements that are not involved in borrows. passes.push_pass(MIR_CONST, mir::transform::clean_end_regions::CleanEndRegions); diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs new file mode 100644 index 000000000000..6934ec7a74f2 --- /dev/null +++ b/src/librustc_mir/transform/add_validation.rs @@ -0,0 +1,43 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate. +//! It has to be run really early, before transformations like inlining, because +//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part +//! of MIR building, and only after this pass we think of the program has having the +//! normal MIR semantics. + +use rustc::ty::TyCtxt; +use rustc::mir::*; +use rustc::mir::transform::{MirPass, MirSource}; + +pub struct AddValidation; + +impl MirPass for AddValidation { + fn run_pass<'a, 'tcx>(&self, + _tcx: TyCtxt<'a, 'tcx, 'tcx>, + _: MirSource, + mir: &mut Mir<'tcx>) { + // Add an AcquireValid at the beginning of the start block + if mir.arg_count > 0 { + let acquire_stmt = Statement { + source_info: SourceInfo { + scope: ARGUMENT_VISIBILITY_SCOPE, + span: mir.span, + }, + kind: StatementKind::Validate(ValidationOp::Acquire, + // Skip return value, go over all the arguments + mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count) + .map(|(local, local_decl)| (local_decl.ty, Lvalue::Local(local))).collect()) + }; + mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, acquire_stmt); + } + } +} diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index c9c8ad0e0eb6..a247ce2231e7 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -24,6 +24,7 @@ use syntax::ast; use syntax_pos::{DUMMY_SP, Span}; use transform; +pub mod add_validation; pub mod clean_end_regions; pub mod simplify_branches; pub mod simplify; From 33585f4fe11968ce652815c8a3debfdf97df6baa Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 11 Jul 2017 14:30:30 -0700 Subject: [PATCH 052/213] CleanEndRegions: do not clean regions that occur in types in validation statements --- .../transform/clean_end_regions.rs | 33 +++++++++++++++++-- src/librustc_mir/transform/erase_regions.rs | 20 +++++++---- 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/src/librustc_mir/transform/clean_end_regions.rs b/src/librustc_mir/transform/clean_end_regions.rs index 36125f945436..28311a5e68cc 100644 --- a/src/librustc_mir/transform/clean_end_regions.rs +++ b/src/librustc_mir/transform/clean_end_regions.rs @@ -24,13 +24,14 @@ use rustc_data_structures::fx::FxHashSet; use rustc::middle::region::CodeExtent; use rustc::mir::transform::{MirPass, MirSource}; use rustc::mir::{BasicBlock, Location, Mir, Rvalue, Statement, StatementKind}; -use rustc::mir::visit::{MutVisitor, Visitor}; -use rustc::ty::{RegionKind, TyCtxt}; +use rustc::mir::visit::{MutVisitor, Visitor, Lookup}; +use rustc::ty::{Ty, RegionKind, TyCtxt}; pub struct CleanEndRegions; struct GatherBorrowedRegions { seen_regions: FxHashSet, + in_validation_statement: bool, } struct DeleteTrivialEndRegions<'a> { @@ -42,7 +43,7 @@ impl MirPass for CleanEndRegions { _tcx: TyCtxt<'a, 'tcx, 'tcx>, _source: MirSource, mir: &mut Mir<'tcx>) { - let mut gather = GatherBorrowedRegions { seen_regions: FxHashSet() }; + let mut gather = GatherBorrowedRegions { seen_regions: FxHashSet(), in_validation_statement: false }; gather.visit_mir(mir); let mut delete = DeleteTrivialEndRegions { seen_regions: &mut gather.seen_regions }; @@ -54,6 +55,7 @@ impl<'tcx> Visitor<'tcx> for GatherBorrowedRegions { fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + // Gather regions that are used for borrows if let Rvalue::Ref(r, _, _) = *rvalue { if let RegionKind::ReScope(ce) = *r { self.seen_regions.insert(ce); @@ -61,6 +63,31 @@ impl<'tcx> Visitor<'tcx> for GatherBorrowedRegions { } self.super_rvalue(rvalue, location); } + + fn visit_statement(&mut self, + block: BasicBlock, + statement: &Statement<'tcx>, + location: Location) { + self.in_validation_statement = match statement.kind { + StatementKind::Validate(..) => true, + _ => false, + }; + self.super_statement(block, statement, location); + self.in_validation_statement = false; + } + + fn visit_ty(&mut self, ty: &Ty<'tcx>, _: Lookup) { + // Gather regions that occur in types inside AcquireValid/ReleaseValid statements + if self.in_validation_statement { + for re in ty.walk().flat_map(|t| t.regions()) { + match *re { + RegionKind::ReScope(ce) => { self.seen_regions.insert(ce); } + _ => {}, + } + } + } + self.super_ty(ty); + } } impl<'a, 'tcx> MutVisitor<'tcx> for DeleteTrivialEndRegions<'a> { diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index da9032685e03..12b1c549ffec 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -11,6 +11,8 @@ //! This pass erases all early-bound regions from the types occuring in the MIR. //! We want to do this once just before trans, so trans does not have to take //! care erasing regions all over the place. +//! NOTE: We do NOT erase regions of statements that are relevant for "types-as-contracts"-validation, +//! namely, AcquireValid, ReleaseValid, and EndRegion. use rustc::ty::subst::Substs; use rustc::ty::{Ty, TyCtxt, ClosureSubsts}; @@ -20,20 +22,24 @@ use rustc::mir::transform::{MirPass, MirSource}; struct EraseRegionsVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, + in_validation_statement: bool, } impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { EraseRegionsVisitor { - tcx: tcx + tcx: tcx, + in_validation_statement: false, } } } impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { fn visit_ty(&mut self, ty: &mut Ty<'tcx>, _: Lookup) { - let old_ty = *ty; - *ty = self.tcx.erase_regions(&old_ty); + if !self.in_validation_statement { + *ty = self.tcx.erase_regions(&{*ty}); + } + self.super_ty(ty); } fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, _: Location) { @@ -71,10 +77,12 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { block: BasicBlock, statement: &mut Statement<'tcx>, location: Location) { - if let StatementKind::EndRegion(_) = statement.kind { - statement.kind = StatementKind::Nop; - } + self.in_validation_statement = match statement.kind { + StatementKind::Validate(..) => true, + _ => false, + }; self.super_statement(block, statement, location); + self.in_validation_statement = false; } } From 82786b2fe12fcc0fc9d5b2e9d069460e05310787 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 11 Jul 2017 16:31:30 -0700 Subject: [PATCH 053/213] emit validation for function calls and Ref --- src/librustc_mir/transform/add_validation.rs | 93 ++++++++++++++++++-- 1 file changed, 88 insertions(+), 5 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 6934ec7a74f2..0c9848de8fca 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -14,7 +14,7 @@ //! of MIR building, and only after this pass we think of the program has having the //! normal MIR semantics. -use rustc::ty::TyCtxt; +use rustc::ty::{TyCtxt, RegionKind}; use rustc::mir::*; use rustc::mir::transform::{MirPass, MirSource}; @@ -22,22 +22,105 @@ pub struct AddValidation; impl MirPass for AddValidation { fn run_pass<'a, 'tcx>(&self, - _tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, _: MirSource, mir: &mut Mir<'tcx>) { - // Add an AcquireValid at the beginning of the start block + // PART 1 + // Add an AcquireValid at the beginning of the start block. if mir.arg_count > 0 { let acquire_stmt = Statement { source_info: SourceInfo { scope: ARGUMENT_VISIBILITY_SCOPE, - span: mir.span, + span: mir.span, // TODO: Consider using just the span covering the function argument declaration }, kind: StatementKind::Validate(ValidationOp::Acquire, // Skip return value, go over all the arguments mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count) - .map(|(local, local_decl)| (local_decl.ty, Lvalue::Local(local))).collect()) + .map(|(local, local_decl)| (local_decl.ty, Lvalue::Local(local))).collect() + ) }; mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, acquire_stmt); } + + // PART 2 + // Add ReleaseValid/AcquireValid around function call terminators. We don't use a visitor because + // we need to access the block that a Call jumps to. + let mut returns : Vec<(SourceInfo, Lvalue<'tcx>, BasicBlock)> = Vec::new(); // Here we collect the destinations. + let local_decls = mir.local_decls.clone(); // TODO: Find a way to get rid of this clone. + for block_data in mir.basic_blocks_mut() { + match block_data.terminator { + Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. }, source_info }) => { + // Before the call: Release all arguments + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, + args.iter().filter_map(|op| { + match op { + &Operand::Consume(ref lval) => { + let ty = lval.ty(&local_decls, tcx).to_ty(tcx); + Some((ty, lval.clone())) + }, + &Operand::Constant(..) => { None }, + } + }).collect()) + }; + block_data.statements.push(release_stmt); + // Remember the return destination for later + if let &Some(ref destination) = destination { + returns.push((source_info, destination.0.clone(), destination.1)); + } + } + _ => { + // Not a block ending in a Call -> ignore. + // TODO: Handle drop. + } + } + } + // Now we go over the returns we collected to acquire the return values. + for (source_info, dest_lval, dest_block) in returns { + let ty = dest_lval.ty(&local_decls, tcx).to_ty(tcx); + let acquire_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, vec![(ty, dest_lval)]) + }; + mir.basic_blocks_mut()[dest_block].statements.insert(0, acquire_stmt); + } + + // PART 3 + // Add ReleaseValid/AcquireValid around Ref. Again an iterator does not seem very suited as + // we need to add new statements before and after each Ref. + for block_data in mir.basic_blocks_mut() { + // We want to insert statements around Ref commands as we iterate. To this end, we iterate backwards + // using indices. + for i in (0..block_data.statements.len()).rev() { + let (dest_lval, re, src_lval) = match block_data.statements[i].kind { + StatementKind::Assign(ref dest_lval, Rvalue::Ref(re, _, ref src_lval)) => { + (dest_lval.clone(), re, src_lval.clone()) + }, + _ => continue, + }; + // So this is a ref, and we got all the data we wanted. + let dest_ty = dest_lval.ty(&local_decls, tcx).to_ty(tcx); + let acquire_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, vec![(dest_ty, dest_lval)]), + }; + block_data.statements.insert(i+1, acquire_stmt); + + // The source is released until the region of the borrow ends. + // FIXME: We have to check whether the source path was writable. + let src_ty = src_lval.ty(&local_decls, tcx).to_ty(tcx); + let op = match re { + &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), + &RegionKind::ReErased => bug!("AddValidation pass must be run before erasing lifetimes"), + _ => ValidationOp::Release, + }; + let release_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(op, vec![(src_ty, src_lval)]), + }; + block_data.statements.insert(i, release_stmt); + } + } } } From 24a2ac9e468a18913e5238475db57f7404a37bc5 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Thu, 13 Jul 2017 22:10:14 -0700 Subject: [PATCH 054/213] add_validation: handle drop --- src/librustc_mir/transform/add_validation.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 0c9848de8fca..dacc267612ff 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -70,9 +70,19 @@ impl MirPass for AddValidation { returns.push((source_info, destination.0.clone(), destination.1)); } } + Some(Terminator { kind: TerminatorKind::Drop { location: ref lval, .. }, source_info }) | + Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref lval, .. }, source_info }) => { + // Before the call: Release all arguments + let ty = lval.ty(&local_decls, tcx).to_ty(tcx); + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, vec![(ty, lval.clone())]) + }; + block_data.statements.push(release_stmt); + // drop doesn't return anything, so we need no acquire. + } _ => { // Not a block ending in a Call -> ignore. - // TODO: Handle drop. } } } @@ -108,7 +118,6 @@ impl MirPass for AddValidation { block_data.statements.insert(i+1, acquire_stmt); // The source is released until the region of the borrow ends. - // FIXME: We have to check whether the source path was writable. let src_ty = src_lval.ty(&local_decls, tcx).to_ty(tcx); let op = match re { &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), From 511b88cdce696d04522aa27a741c4033d54e0bef Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Thu, 20 Jul 2017 15:27:03 -0700 Subject: [PATCH 055/213] only emit Suspend validation for mutable paths --- src/librustc_mir/transform/add_validation.rs | 53 +++++++++++++++----- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index dacc267612ff..b79c1a2d6fdb 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -20,6 +20,33 @@ use rustc::mir::transform::{MirPass, MirSource}; pub struct AddValidation; + +fn is_lvalue_shared<'a, 'tcx, D>(lval: &Lvalue<'tcx>, local_decls: &D, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool + where D: HasLocalDecls<'tcx> +{ + use rustc::mir::Lvalue::*; + + match *lval { + Local { .. } => false, + Static(_) => true, + Projection(ref proj) => { + // If the base is shared, things stay shared + if is_lvalue_shared(&proj.base, local_decls, tcx) { + return true; + } + // A Deref projection may make things shared + match proj.elem { + ProjectionElem::Deref => { + // Computing the inside the recursion makes this quadratic. We don't expect deep paths though. + let ty = proj.base.ty(local_decls, tcx).to_ty(tcx); + !ty.is_mutable_pointer() + } + _ => false, + } + } + } +} + impl MirPass for AddValidation { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -117,18 +144,20 @@ impl MirPass for AddValidation { }; block_data.statements.insert(i+1, acquire_stmt); - // The source is released until the region of the borrow ends. - let src_ty = src_lval.ty(&local_decls, tcx).to_ty(tcx); - let op = match re { - &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), - &RegionKind::ReErased => bug!("AddValidation pass must be run before erasing lifetimes"), - _ => ValidationOp::Release, - }; - let release_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(op, vec![(src_ty, src_lval)]), - }; - block_data.statements.insert(i, release_stmt); + // The source is released until the region of the borrow ends -- but not if it is shared. + if !is_lvalue_shared(&src_lval, &local_decls, tcx) { + let src_ty = src_lval.ty(&local_decls, tcx).to_ty(tcx); + let op = match re { + &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), + &RegionKind::ReErased => bug!("AddValidation pass must be run before erasing lifetimes"), + _ => ValidationOp::Release, + }; + let release_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(op, vec![(src_ty, src_lval)]), + }; + block_data.statements.insert(i, release_stmt); + } } } } From a233afa794763846a8d970ecedf763350cc2c067 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Thu, 20 Jul 2017 15:27:26 -0700 Subject: [PATCH 056/213] respect lifetime rendering when rendering Suspend validation op --- src/librustc/mir/mod.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index c7be58c13f86..dcab476ec23d 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -836,13 +836,25 @@ pub enum StatementKind<'tcx> { Nop, } -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, PartialEq, Eq)] +#[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)] pub enum ValidationOp { Acquire, Release, Suspend(CodeExtent), } +impl Debug for ValidationOp { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + use self::ValidationOp::*; + match *self { + Acquire => write!(fmt, "Acquire"), + Release => write!(fmt, "Release"), + // (reuse lifetime rendering policy from ppaux.) + Suspend(ref ce) => write!(fmt, "Suspend({})", ty::ReScope(*ce)), + } + } +} + impl<'tcx> Debug for Statement<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::StatementKind::*; From 60096b9e8259ba227a0a85fc1a16dca5d3fd2217 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 21 Jul 2017 12:43:09 -0700 Subject: [PATCH 057/213] when suspending, we need to specify for which lifetime to recover This matters if the lvalues that is suspended involves Deref'ing a reference -- that reference's lifetime will then not be in the type any more --- src/librustc/hir/mod.rs | 10 ++ src/librustc/ich/impls_mir.rs | 2 + src/librustc/mir/mod.rs | 43 +++++- src/librustc/mir/visit.rs | 7 +- src/librustc_mir/transform/add_validation.rs | 141 ++++++++++++------- 5 files changed, 147 insertions(+), 56 deletions(-) diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index a3a133daa09c..cc0d49c1a363 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -684,6 +684,16 @@ pub enum Mutability { MutImmutable, } +impl Mutability { + /// Return MutMutable only if both arguments are mutable. + pub fn and(self, other: Self) -> Self { + match self { + MutMutable => other, + MutImmutable => MutImmutable, + } + } +} + #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] pub enum BinOp_ { /// The `+` operator (addition) diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index eb0c62a11618..dc41f981ed57 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -243,6 +243,8 @@ for mir::StatementKind<'tcx> { } } +impl_stable_hash_for!(struct mir::ValidationOperand<'tcx> { lval, ty, re, mutbl }); + impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(extent) }); impl<'a, 'gcx, 'tcx> HashStable> for mir::Lvalue<'tcx> { diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index dcab476ec23d..4655f8a9c15e 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -25,7 +25,7 @@ use ty::{self, AdtDef, ClosureSubsts, Region, Ty}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use util::ppaux; use rustc_back::slice; -use hir::InlineAsm; +use hir::{self, InlineAsm}; use std::ascii; use std::borrow::{Cow}; use std::cell::Ref; @@ -826,7 +826,7 @@ pub enum StatementKind<'tcx> { }, /// Assert the given lvalues to be valid inhabitants of their type. - Validate(ValidationOp, Vec<(Ty<'tcx>, Lvalue<'tcx>)>), + Validate(ValidationOp, Vec>), /// Mark one terminating point of an extent (i.e. static region). /// (The starting point(s) arise implicitly from borrows.) @@ -855,6 +855,28 @@ impl Debug for ValidationOp { } } +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct ValidationOperand<'tcx> { + pub lval: Lvalue<'tcx>, + pub ty: Ty<'tcx>, + pub re: Option, + pub mutbl: hir::Mutability, +} + +impl<'tcx> Debug for ValidationOperand<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + write!(fmt, "{:?}@{:?}", self.lval, self.ty)?; + if let Some(ce) = self.re { + // (reuse lifetime rendering policy from ppaux.) + write!(fmt, "/{}", ty::ReScope(ce))?; + } + if let hir::MutImmutable = self.mutbl { + write!(fmt, " (imm)")?; + } + Ok(()) + } +} + impl<'tcx> Debug for Statement<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::StatementKind::*; @@ -1505,6 +1527,21 @@ impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { } } +impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ValidationOperand { + lval: self.lval.fold_with(folder), + ty: self.ty.fold_with(folder), + re: self.re, + mutbl: self.mutbl, + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.lval.visit_with(visitor) || self.ty.visit_with(visitor) + } +} + impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { use mir::StatementKind::*; @@ -1531,7 +1568,7 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { Validate(ref op, ref lvals) => Validate(op.clone(), - lvals.iter().map(|ty_and_lval| ty_and_lval.fold_with(folder)).collect()), + lvals.iter().map(|operand| operand.fold_with(folder)).collect()), Nop => Nop, }; diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 5284a6132396..a05007503cef 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -334,9 +334,10 @@ macro_rules! make_mir_visitor { } StatementKind::EndRegion(_) => {} StatementKind::Validate(_, ref $($mutability)* lvalues) => { - for & $($mutability)* (ref $($mutability)* ty, ref $($mutability)* lvalue) in lvalues { - self.visit_ty(ty, Lookup::Loc(location)); - self.visit_lvalue(lvalue, LvalueContext::Validate, location); + for operand in lvalues { + self.visit_lvalue(& $($mutability)* operand.lval, + LvalueContext::Validate, location); + self.visit_ty(& $($mutability)* operand.ty, Lookup::Loc(location)); } } StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => { diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index b79c1a2d6fdb..1fe16fb98f22 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -14,34 +14,67 @@ //! of MIR building, and only after this pass we think of the program has having the //! normal MIR semantics. -use rustc::ty::{TyCtxt, RegionKind}; +use rustc::ty::{self, TyCtxt, RegionKind}; +use rustc::hir; use rustc::mir::*; use rustc::mir::transform::{MirPass, MirSource}; +use rustc::middle::region::CodeExtent; pub struct AddValidation; - -fn is_lvalue_shared<'a, 'tcx, D>(lval: &Lvalue<'tcx>, local_decls: &D, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool +/// Determine the "context" of the lval: Mutability and region. +fn lval_context<'a, 'tcx, D>( + lval: &Lvalue<'tcx>, + local_decls: &D, + tcx: TyCtxt<'a, 'tcx, 'tcx> +) -> (Option, hir::Mutability) where D: HasLocalDecls<'tcx> { use rustc::mir::Lvalue::*; match *lval { - Local { .. } => false, - Static(_) => true, + Local { .. } => (None, hir::MutMutable), + Static(_) => (None, hir::MutImmutable), Projection(ref proj) => { - // If the base is shared, things stay shared - if is_lvalue_shared(&proj.base, local_decls, tcx) { - return true; - } - // A Deref projection may make things shared match proj.elem { ProjectionElem::Deref => { - // Computing the inside the recursion makes this quadratic. We don't expect deep paths though. + // Computing the inside the recursion makes this quadratic. + // We don't expect deep paths though. let ty = proj.base.ty(local_decls, tcx).to_ty(tcx); - !ty.is_mutable_pointer() + // A Deref projection may restrict the context, this depends on the type + // being deref'd. + let context = match ty.sty { + ty::TyRef(re, tam) => { + let re = match re { + &RegionKind::ReScope(ce) => Some(ce), + &RegionKind::ReErased => + bug!("AddValidation pass must be run before erasing lifetimes"), + _ => None + }; + (re, tam.mutbl) + } + ty::TyRawPtr(_) => + // There is no guarantee behind even a mutable raw pointer, + // no write locks are acquired there, so we also don't want to + // release any. + (None, hir::MutImmutable), + ty::TyAdt(adt, _) if adt.is_box() => (None, hir::MutMutable), + _ => bug!("Deref on a non-pointer type {:?}", ty), + }; + // "Intersect" this restriction with proj.base. + if let (Some(_), hir::MutImmutable) = context { + // This is already as restricted as it gets, no need to even recurse + context + } else { + let base_context = lval_context(&proj.base, local_decls, tcx); + // The region of the outermost Deref is always most restrictive. + let re = context.0.or(base_context.0); + let mutbl = context.1.and(base_context.1); + (re, mutbl) + } + } - _ => false, + _ => lval_context(&proj.base, local_decls, tcx), } } } @@ -52,41 +85,49 @@ impl MirPass for AddValidation { tcx: TyCtxt<'a, 'tcx, 'tcx>, _: MirSource, mir: &mut Mir<'tcx>) { + let local_decls = mir.local_decls.clone(); // TODO: Find a way to get rid of this clone. + + /// Convert an lvalue to a validation operand. + let lval_to_operand = |lval: Lvalue<'tcx>| -> ValidationOperand<'tcx> { + let (re, mutbl) = lval_context(&lval, &local_decls, tcx); + let ty = lval.ty(&local_decls, tcx).to_ty(tcx); + ValidationOperand { lval, ty, re, mutbl } + }; + // PART 1 // Add an AcquireValid at the beginning of the start block. if mir.arg_count > 0 { let acquire_stmt = Statement { source_info: SourceInfo { scope: ARGUMENT_VISIBILITY_SCOPE, - span: mir.span, // TODO: Consider using just the span covering the function argument declaration + span: mir.span, // TODO: Consider using just the span covering the function + // argument declaration. }, kind: StatementKind::Validate(ValidationOp::Acquire, // Skip return value, go over all the arguments mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count) - .map(|(local, local_decl)| (local_decl.ty, Lvalue::Local(local))).collect() + .map(|(local, _)| lval_to_operand(Lvalue::Local(local))).collect() ) }; mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, acquire_stmt); } // PART 2 - // Add ReleaseValid/AcquireValid around function call terminators. We don't use a visitor because - // we need to access the block that a Call jumps to. - let mut returns : Vec<(SourceInfo, Lvalue<'tcx>, BasicBlock)> = Vec::new(); // Here we collect the destinations. - let local_decls = mir.local_decls.clone(); // TODO: Find a way to get rid of this clone. + // Add ReleaseValid/AcquireValid around function call terminators. We don't use a visitor + // because we need to access the block that a Call jumps to. + let mut returns : Vec<(SourceInfo, Lvalue<'tcx>, BasicBlock)> = Vec::new(); for block_data in mir.basic_blocks_mut() { match block_data.terminator { - Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. }, source_info }) => { + Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. }, + source_info }) => { // Before the call: Release all arguments let release_stmt = Statement { source_info, kind: StatementKind::Validate(ValidationOp::Release, args.iter().filter_map(|op| { match op { - &Operand::Consume(ref lval) => { - let ty = lval.ty(&local_decls, tcx).to_ty(tcx); - Some((ty, lval.clone())) - }, + &Operand::Consume(ref lval) => + Some(lval_to_operand(lval.clone())), &Operand::Constant(..) => { None }, } }).collect()) @@ -97,13 +138,15 @@ impl MirPass for AddValidation { returns.push((source_info, destination.0.clone(), destination.1)); } } - Some(Terminator { kind: TerminatorKind::Drop { location: ref lval, .. }, source_info }) | - Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref lval, .. }, source_info }) => { + Some(Terminator { kind: TerminatorKind::Drop { location: ref lval, .. }, + source_info }) | + Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref lval, .. }, + source_info }) => { // Before the call: Release all arguments - let ty = lval.ty(&local_decls, tcx).to_ty(tcx); let release_stmt = Statement { source_info, - kind: StatementKind::Validate(ValidationOp::Release, vec![(ty, lval.clone())]) + kind: StatementKind::Validate(ValidationOp::Release, + vec![lval_to_operand(lval.clone())]), }; block_data.statements.push(release_stmt); // drop doesn't return anything, so we need no acquire. @@ -115,20 +158,20 @@ impl MirPass for AddValidation { } // Now we go over the returns we collected to acquire the return values. for (source_info, dest_lval, dest_block) in returns { - let ty = dest_lval.ty(&local_decls, tcx).to_ty(tcx); let acquire_stmt = Statement { source_info, - kind: StatementKind::Validate(ValidationOp::Acquire, vec![(ty, dest_lval)]) + kind: StatementKind::Validate(ValidationOp::Acquire, + vec![lval_to_operand(dest_lval)]), }; mir.basic_blocks_mut()[dest_block].statements.insert(0, acquire_stmt); } // PART 3 - // Add ReleaseValid/AcquireValid around Ref. Again an iterator does not seem very suited as - // we need to add new statements before and after each Ref. + // Add ReleaseValid/AcquireValid around Ref. Again an iterator does not seem very suited + // as we need to add new statements before and after each Ref. for block_data in mir.basic_blocks_mut() { - // We want to insert statements around Ref commands as we iterate. To this end, we iterate backwards - // using indices. + // We want to insert statements around Ref commands as we iterate. To this end, we + // iterate backwards using indices. for i in (0..block_data.statements.len()).rev() { let (dest_lval, re, src_lval) = match block_data.statements[i].kind { StatementKind::Assign(ref dest_lval, Rvalue::Ref(re, _, ref src_lval)) => { @@ -137,27 +180,25 @@ impl MirPass for AddValidation { _ => continue, }; // So this is a ref, and we got all the data we wanted. - let dest_ty = dest_lval.ty(&local_decls, tcx).to_ty(tcx); let acquire_stmt = Statement { source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(ValidationOp::Acquire, vec![(dest_ty, dest_lval)]), + kind: StatementKind::Validate(ValidationOp::Acquire, + vec![lval_to_operand(dest_lval)]), }; block_data.statements.insert(i+1, acquire_stmt); - // The source is released until the region of the borrow ends -- but not if it is shared. - if !is_lvalue_shared(&src_lval, &local_decls, tcx) { - let src_ty = src_lval.ty(&local_decls, tcx).to_ty(tcx); - let op = match re { - &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), - &RegionKind::ReErased => bug!("AddValidation pass must be run before erasing lifetimes"), - _ => ValidationOp::Release, - }; - let release_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(op, vec![(src_ty, src_lval)]), - }; - block_data.statements.insert(i, release_stmt); - } + // The source is released until the region of the borrow ends. + let op = match re { + &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), + &RegionKind::ReErased => + bug!("AddValidation pass must be run before erasing lifetimes"), + _ => ValidationOp::Release, + }; + let release_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(op, vec![lval_to_operand(src_lval)]), + }; + block_data.statements.insert(i, release_stmt); } } } From e869cf2be74372db55b64eb549f4dc0e6b5a667b Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 21 Jul 2017 14:49:01 -0700 Subject: [PATCH 058/213] make ValidationOperand generic so that we can reuse it in miri with a different Lvalue type --- src/librustc/ich/impls_mir.rs | 14 +++++++++++++- src/librustc/mir/mod.rs | 11 ++++++----- src/librustc_mir/transform/add_validation.rs | 2 +- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index dc41f981ed57..bef35fdc2578 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -243,7 +243,19 @@ for mir::StatementKind<'tcx> { } } -impl_stable_hash_for!(struct mir::ValidationOperand<'tcx> { lval, ty, re, mutbl }); +impl<'a, 'gcx, 'tcx, T> HashStable> for mir::ValidationOperand<'tcx, T> + where T: HashStable> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>, + hasher: &mut StableHasher) + { + self.lval.hash_stable(hcx, hasher); + self.ty.hash_stable(hcx, hasher); + self.re.hash_stable(hcx, hasher); + self.mutbl.hash_stable(hcx, hasher); + } +} impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(extent) }); diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 4655f8a9c15e..f8261f806296 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -826,7 +826,7 @@ pub enum StatementKind<'tcx> { }, /// Assert the given lvalues to be valid inhabitants of their type. - Validate(ValidationOp, Vec>), + Validate(ValidationOp, Vec>>), /// Mark one terminating point of an extent (i.e. static region). /// (The starting point(s) arise implicitly from borrows.) @@ -855,15 +855,16 @@ impl Debug for ValidationOp { } } +// This is generic so that it can be reused by miri #[derive(Clone, RustcEncodable, RustcDecodable)] -pub struct ValidationOperand<'tcx> { - pub lval: Lvalue<'tcx>, +pub struct ValidationOperand<'tcx, T> { + pub lval: T, pub ty: Ty<'tcx>, pub re: Option, pub mutbl: hir::Mutability, } -impl<'tcx> Debug for ValidationOperand<'tcx> { +impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{:?}@{:?}", self.lval, self.ty)?; if let Some(ce) = self.re { @@ -1527,7 +1528,7 @@ impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx> { +impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Lvalue<'tcx>> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { ValidationOperand { lval: self.lval.fold_with(folder), diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 1fe16fb98f22..005d793cd8b5 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -88,7 +88,7 @@ impl MirPass for AddValidation { let local_decls = mir.local_decls.clone(); // TODO: Find a way to get rid of this clone. /// Convert an lvalue to a validation operand. - let lval_to_operand = |lval: Lvalue<'tcx>| -> ValidationOperand<'tcx> { + let lval_to_operand = |lval: Lvalue<'tcx>| -> ValidationOperand<'tcx, Lvalue<'tcx>> { let (re, mutbl) = lval_context(&lval, &local_decls, tcx); let ty = lval.ty(&local_decls, tcx).to_ty(tcx); ValidationOperand { lval, ty, re, mutbl } From 23cd90ed41b6f1299d51da80cc6481f28a4b0f1f Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 21 Jul 2017 23:18:34 -0700 Subject: [PATCH 059/213] add -Z flag for AddValidation pass --- src/librustc/session/config.rs | 2 ++ src/librustc_mir/transform/add_validation.rs | 4 ++++ src/librustc_mir/transform/erase_regions.rs | 6 ++++++ 3 files changed, 12 insertions(+) diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 8b55eb4c099a..c5ddcb597cbb 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1025,6 +1025,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "the directory the MIR is dumped into"), dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED], "if set, exclude the pass number when dumping MIR (used in tests)"), + mir_emit_validate: bool = (false, parse_bool, [TRACKED], + "emit Validate MIR statements, interpreted e.g. by miri"), perf_stats: bool = (false, parse_bool, [UNTRACKED], "print some performance-related statistics"), hir_stats: bool = (false, parse_bool, [UNTRACKED], diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 005d793cd8b5..e400683e8b4e 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -85,6 +85,10 @@ impl MirPass for AddValidation { tcx: TyCtxt<'a, 'tcx, 'tcx>, _: MirSource, mir: &mut Mir<'tcx>) { + if !tcx.sess.opts.debugging_opts.mir_emit_validate { + return; + } + let local_decls = mir.local_decls.clone(); // TODO: Find a way to get rid of this clone. /// Convert an lvalue to a validation operand. diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 12b1c549ffec..05376ff3d526 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -77,6 +77,12 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { block: BasicBlock, statement: &mut Statement<'tcx>, location: Location) { + if !self.tcx.sess.opts.debugging_opts.mir_emit_validate { + if let StatementKind::EndRegion(_) = statement.kind { + statement.kind = StatementKind::Nop; + } + } + self.in_validation_statement = match statement.kind { StatementKind::Validate(..) => true, _ => false, From b6816b2b56f5e1044f100e0ab7da0d9540d8f9cf Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 22 Jul 2017 01:04:16 -0700 Subject: [PATCH 060/213] please the tidy --- src/librustc/ich/impls_mir.rs | 3 ++- src/librustc/mir/mod.rs | 3 ++- src/librustc_mir/transform/add_validation.rs | 4 ++-- src/librustc_mir/transform/clean_end_regions.rs | 5 ++++- src/librustc_mir/transform/erase_regions.rs | 4 ++-- 5 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index bef35fdc2578..c20864183f47 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -243,7 +243,8 @@ for mir::StatementKind<'tcx> { } } -impl<'a, 'gcx, 'tcx, T> HashStable> for mir::ValidationOperand<'tcx, T> +impl<'a, 'gcx, 'tcx, T> HashStable> + for mir::ValidationOperand<'tcx, T> where T: HashStable> { fn hash_stable(&self, diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index f8261f806296..3ee86dbdc846 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -1596,7 +1596,8 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { // trait with a `fn visit_extent`. EndRegion(ref _extent) => false, - Validate(ref _op, ref lvalues) => lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)), + Validate(ref _op, ref lvalues) => + lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)), Nop => false, } diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index e400683e8b4e..d91db41d20d3 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -89,7 +89,7 @@ impl MirPass for AddValidation { return; } - let local_decls = mir.local_decls.clone(); // TODO: Find a way to get rid of this clone. + let local_decls = mir.local_decls.clone(); // FIXME: Find a way to get rid of this clone. /// Convert an lvalue to a validation operand. let lval_to_operand = |lval: Lvalue<'tcx>| -> ValidationOperand<'tcx, Lvalue<'tcx>> { @@ -104,7 +104,7 @@ impl MirPass for AddValidation { let acquire_stmt = Statement { source_info: SourceInfo { scope: ARGUMENT_VISIBILITY_SCOPE, - span: mir.span, // TODO: Consider using just the span covering the function + span: mir.span, // FIXME: Consider using just the span covering the function // argument declaration. }, kind: StatementKind::Validate(ValidationOp::Acquire, diff --git a/src/librustc_mir/transform/clean_end_regions.rs b/src/librustc_mir/transform/clean_end_regions.rs index 28311a5e68cc..1a31bf975307 100644 --- a/src/librustc_mir/transform/clean_end_regions.rs +++ b/src/librustc_mir/transform/clean_end_regions.rs @@ -43,7 +43,10 @@ impl MirPass for CleanEndRegions { _tcx: TyCtxt<'a, 'tcx, 'tcx>, _source: MirSource, mir: &mut Mir<'tcx>) { - let mut gather = GatherBorrowedRegions { seen_regions: FxHashSet(), in_validation_statement: false }; + let mut gather = GatherBorrowedRegions { + seen_regions: FxHashSet(), + in_validation_statement: false + }; gather.visit_mir(mir); let mut delete = DeleteTrivialEndRegions { seen_regions: &mut gather.seen_regions }; diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 05376ff3d526..f01d71fde264 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -11,8 +11,8 @@ //! This pass erases all early-bound regions from the types occuring in the MIR. //! We want to do this once just before trans, so trans does not have to take //! care erasing regions all over the place. -//! NOTE: We do NOT erase regions of statements that are relevant for "types-as-contracts"-validation, -//! namely, AcquireValid, ReleaseValid, and EndRegion. +//! NOTE: We do NOT erase regions of statements that are relevant for +//! "types-as-contracts"-validation, namely, AcquireValid, ReleaseValid, and EndRegion. use rustc::ty::subst::Substs; use rustc::ty::{Ty, TyCtxt, ClosureSubsts}; From 04f962adc39c2632da8f712b2cd38eb6109ae5a1 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 24 Jul 2017 19:19:39 -0700 Subject: [PATCH 061/213] after a Ref, only acquire the Deref'd destination --- src/librustc_mir/transform/add_validation.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index d91db41d20d3..4edcab738c37 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -184,6 +184,10 @@ impl MirPass for AddValidation { _ => continue, }; // So this is a ref, and we got all the data we wanted. + // Do an acquire of the result -- but only what it points to, so add a Deref + // projection. + let dest_lval = Projection { base: dest_lval, elem: ProjectionElem::Deref }; + let dest_lval = Lvalue::Projection(Box::new(dest_lval)); let acquire_stmt = Statement { source_info: block_data.statements[i].source_info, kind: StatementKind::Validate(ValidationOp::Acquire, From b934506e681e00b803ca886122062916b41e0fbe Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 25 Jul 2017 16:44:49 -0700 Subject: [PATCH 062/213] Reorder passes so that AddValidation can run after ElaborateDrops --- src/librustc_driver/driver.rs | 23 +++++++++++-------- .../transform/clean_end_regions.rs | 21 +++++------------ 2 files changed, 19 insertions(+), 25 deletions(-) diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 68e6b0f50d1d..2b667d83e35e 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -925,10 +925,6 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, let mut passes = Passes::new(); passes.push_hook(mir::transform::dump_mir::DumpMir); - // Insert AcquireValid and ReleaseValid calls. Conceptually, this - // pass is actually part of MIR building. - passes.push_pass(MIR_CONST, mir::transform::add_validation::AddValidation); - // Remove all `EndRegion` statements that are not involved in borrows. passes.push_pass(MIR_CONST, mir::transform::clean_end_regions::CleanEndRegions); @@ -937,6 +933,8 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, passes.push_pass(MIR_CONST, mir::transform::type_check::TypeckMir); passes.push_pass(MIR_CONST, mir::transform::rustc_peek::SanityCheck); + // We compute "constant qualifications" betwen MIR_CONST and MIR_VALIDATED. + // What we need to run borrowck etc. passes.push_pass(MIR_VALIDATED, mir::transform::qualify_consts::QualifyAndPromoteConstants); passes.push_pass(MIR_VALIDATED, @@ -944,18 +942,23 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, passes.push_pass(MIR_VALIDATED, mir::transform::simplify::SimplifyCfg::new("qualify-consts")); passes.push_pass(MIR_VALIDATED, mir::transform::nll::NLL); - // Optimizations begin. - passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); - passes.push_pass(MIR_OPTIMIZED, mir::transform::simplify::SimplifyCfg::new("no-landing-pads")); + // borrowck runs between MIR_VALIDATED and MIR_OPTIMIZED. - // From here on out, regions are gone. - passes.push_pass(MIR_OPTIMIZED, mir::transform::erase_regions::EraseRegions); + // These next passes must be executed together + passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); passes.push_pass(MIR_OPTIMIZED, mir::transform::add_call_guards::AddCallGuards); passes.push_pass(MIR_OPTIMIZED, mir::transform::elaborate_drops::ElaborateDrops); passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads); passes.push_pass(MIR_OPTIMIZED, mir::transform::simplify::SimplifyCfg::new("elaborate-drops")); - // No lifetime analysis based on borrowing can be done from here on out. + + // AddValidation needs to run after ElaborateDrops and before EraseRegions. + passes.push_pass(MIR_OPTIMIZED, mir::transform::add_validation::AddValidation); + + // From here on out, regions are gone. + passes.push_pass(MIR_OPTIMIZED, mir::transform::erase_regions::EraseRegions); + + // Optimizations begin. passes.push_pass(MIR_OPTIMIZED, mir::transform::inline::Inline); passes.push_pass(MIR_OPTIMIZED, mir::transform::instcombine::InstCombine); passes.push_pass(MIR_OPTIMIZED, mir::transform::deaggregator::Deaggregator); diff --git a/src/librustc_mir/transform/clean_end_regions.rs b/src/librustc_mir/transform/clean_end_regions.rs index 1a31bf975307..d7ec58384a46 100644 --- a/src/librustc_mir/transform/clean_end_regions.rs +++ b/src/librustc_mir/transform/clean_end_regions.rs @@ -31,7 +31,6 @@ pub struct CleanEndRegions; struct GatherBorrowedRegions { seen_regions: FxHashSet, - in_validation_statement: bool, } struct DeleteTrivialEndRegions<'a> { @@ -44,8 +43,7 @@ impl MirPass for CleanEndRegions { _source: MirSource, mir: &mut Mir<'tcx>) { let mut gather = GatherBorrowedRegions { - seen_regions: FxHashSet(), - in_validation_statement: false + seen_regions: FxHashSet() }; gather.visit_mir(mir); @@ -71,22 +69,15 @@ impl<'tcx> Visitor<'tcx> for GatherBorrowedRegions { block: BasicBlock, statement: &Statement<'tcx>, location: Location) { - self.in_validation_statement = match statement.kind { - StatementKind::Validate(..) => true, - _ => false, - }; self.super_statement(block, statement, location); - self.in_validation_statement = false; } fn visit_ty(&mut self, ty: &Ty<'tcx>, _: Lookup) { - // Gather regions that occur in types inside AcquireValid/ReleaseValid statements - if self.in_validation_statement { - for re in ty.walk().flat_map(|t| t.regions()) { - match *re { - RegionKind::ReScope(ce) => { self.seen_regions.insert(ce); } - _ => {}, - } + // Gather regions that occur in types + for re in ty.walk().flat_map(|t| t.regions()) { + match *re { + RegionKind::ReScope(ce) => { self.seen_regions.insert(ce); } + _ => {}, } } self.super_ty(ty); From 7ec50dfee3f2c2562586a59d80d3d1e9d2d0c0cd Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Wed, 26 Jul 2017 20:51:36 -0700 Subject: [PATCH 063/213] also release/validate around non-Misc casts --- src/librustc_mir/transform/add_validation.rs | 101 +++++++++++++------ 1 file changed, 71 insertions(+), 30 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 4edcab738c37..70ef08cf2d18 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -171,42 +171,83 @@ impl MirPass for AddValidation { } // PART 3 - // Add ReleaseValid/AcquireValid around Ref. Again an iterator does not seem very suited + // Add ReleaseValid/AcquireValid around Ref and Cast. Again an iterator does not seem very + // suited // as we need to add new statements before and after each Ref. for block_data in mir.basic_blocks_mut() { // We want to insert statements around Ref commands as we iterate. To this end, we // iterate backwards using indices. for i in (0..block_data.statements.len()).rev() { - let (dest_lval, re, src_lval) = match block_data.statements[i].kind { - StatementKind::Assign(ref dest_lval, Rvalue::Ref(re, _, ref src_lval)) => { - (dest_lval.clone(), re, src_lval.clone()) - }, - _ => continue, - }; - // So this is a ref, and we got all the data we wanted. - // Do an acquire of the result -- but only what it points to, so add a Deref - // projection. - let dest_lval = Projection { base: dest_lval, elem: ProjectionElem::Deref }; - let dest_lval = Lvalue::Projection(Box::new(dest_lval)); - let acquire_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(ValidationOp::Acquire, - vec![lval_to_operand(dest_lval)]), - }; - block_data.statements.insert(i+1, acquire_stmt); + match block_data.statements[i].kind { + // When the borrow of this ref expires, we need to recover validation. + StatementKind::Assign(_, Rvalue::Ref(_, _, _)) => { + // Due to a lack of NLL; we can't capture anything directly here. + // Instead, we have to re-match and clone there. + let (dest_lval, re, src_lval) = match block_data.statements[i].kind { + StatementKind::Assign(ref dest_lval, + Rvalue::Ref(re, _, ref src_lval)) => { + (dest_lval.clone(), re, src_lval.clone()) + }, + _ => bug!("We already matched this."), + }; + // So this is a ref, and we got all the data we wanted. + // Do an acquire of the result -- but only what it points to, so add a Deref + // projection. + let dest_lval = Projection { base: dest_lval, elem: ProjectionElem::Deref }; + let dest_lval = Lvalue::Projection(Box::new(dest_lval)); + let acquire_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, + vec![lval_to_operand(dest_lval)]), + }; + block_data.statements.insert(i+1, acquire_stmt); - // The source is released until the region of the borrow ends. - let op = match re { - &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), - &RegionKind::ReErased => - bug!("AddValidation pass must be run before erasing lifetimes"), - _ => ValidationOp::Release, - }; - let release_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(op, vec![lval_to_operand(src_lval)]), - }; - block_data.statements.insert(i, release_stmt); + // The source is released until the region of the borrow ends. + let op = match re { + &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), + &RegionKind::ReErased => + bug!("AddValidation pass must be run before erasing lifetimes"), + _ => ValidationOp::Release, + }; + let release_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(op, vec![lval_to_operand(src_lval)]), + }; + block_data.statements.insert(i, release_stmt); + } + // Casts can change what validation does (e.g. unsizing) + StatementKind::Assign(_, Rvalue::Cast(kind, Operand::Consume(_), _)) + if kind != CastKind::Misc => + { + // Due to a lack of NLL; we can't capture anything directly here. + // Instead, we have to re-match and clone there. + let (dest_lval, src_lval) = match block_data.statements[i].kind { + StatementKind::Assign(ref dest_lval, + Rvalue::Cast(_, Operand::Consume(ref src_lval), _)) => + { + (dest_lval.clone(), src_lval.clone()) + }, + _ => bug!("We already matched this."), + }; + + // Acquire of the result + let acquire_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, + vec![lval_to_operand(dest_lval)]), + }; + block_data.statements.insert(i+1, acquire_stmt); + + // Release of the input + let release_stmt = Statement { + source_info: block_data.statements[i].source_info, + kind: StatementKind::Validate(ValidationOp::Release, + vec![lval_to_operand(src_lval)]), + }; + block_data.statements.insert(i, release_stmt); + } + _ => {}, + } } } } From 57958d1a04d12dbd3ea51904c2f38fbc3b40d246 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 28 Jul 2017 15:16:17 -0700 Subject: [PATCH 064/213] Add tests for emitting validation statements --- src/test/mir-opt/README.md | 11 ++------ src/test/mir-opt/validate_1.rs | 48 ++++++++++++++++++++++++++++++++++ src/test/mir-opt/validate_2.rs | 26 ++++++++++++++++++ src/test/mir-opt/validate_3.rs | 45 +++++++++++++++++++++++++++++++ 4 files changed, 121 insertions(+), 9 deletions(-) create mode 100644 src/test/mir-opt/validate_1.rs create mode 100644 src/test/mir-opt/validate_2.rs create mode 100644 src/test/mir-opt/validate_3.rs diff --git a/src/test/mir-opt/README.md b/src/test/mir-opt/README.md index 28a124e3c61c..d999ff975516 100644 --- a/src/test/mir-opt/README.md +++ b/src/test/mir-opt/README.md @@ -57,13 +57,6 @@ the lines being too long. compiletest handles dumping the MIR before and after every pass for you. The test writer only has to specify the file names of the dumped files (not the -full path to the file) and what lines to expect. I added an option to rustc +full path to the file) and what lines to expect. There is an option to rustc that tells it to dump the mir into some directly (rather then always dumping to -the current directory). - -Lines match ignoring whitespace, and the prefix "//" is removed of course. - -It also currently strips trailing comments -- partly because the full file path -in "scope comments" is unpredictable and partly because tidy complains about -the lines being too long. - +the current directory). diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs new file mode 100644 index 000000000000..0059fc930620 --- /dev/null +++ b/src/test/mir-opt/validate_1.rs @@ -0,0 +1,48 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -Z verbose -Z mir-emit-validate + +fn foo(_x: &mut i32) {} + +fn main() { + let mut x = 0; + foo(&mut x); +} + +// END RUST SOURCE +// START rustc.node4.EraseRegions.after.mir +// fn foo(_1: &ReErased mut i32) -> () { +// bb0: { +// Validate(Acquire, [_1@&ReFree(DefId { krate: CrateNum(0), node: DefIndex(3) => validate_1/8cd878b::foo[0] }, BrAnon(0)) mut i32]); +// return; +// } +// } +// END rustc.node4.EraseRegions.after.mir +// START rustc.node11.EraseRegions.after.mir +// fn main() -> () { +// bb0: { +// Validate(Suspend(ReScope(Misc(NodeId(20)))), [_1@i32]); +// _4 = &ReErased mut _1; +// Validate(Acquire, [(*_4)@i32/ReScope(Misc(NodeId(20)))]); +// Validate(Suspend(ReScope(Misc(NodeId(20)))), [(*_4)@i32/ReScope(Misc(NodeId(20)))]); +// _3 = &ReErased mut (*_4); +// Validate(Acquire, [(*_3)@i32/ReScope(Misc(NodeId(20)))]); +// Validate(Release, [_3@&ReScope(Misc(NodeId(20))) mut i32]); +// _2 = const foo(_3) -> bb1; +// } +// +// bb1: { +// Validate(Acquire, [_2@()]); +// EndRegion(ReScope(Misc(NodeId(20)))); +// return; +// } +// } +// END rustc.node11.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_2.rs b/src/test/mir-opt/validate_2.rs new file mode 100644 index 000000000000..f1562c8c34ce --- /dev/null +++ b/src/test/mir-opt/validate_2.rs @@ -0,0 +1,26 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -Z verbose -Z mir-emit-validate + +fn main() { + let _x : Box<[i32]> = Box::new([1, 2, 3]); +} + +// END RUST SOURCE +// START rustc.node4.EraseRegions.after.mir +// fn main() -> () { +// bb1: { +// Validate(Release, [_2@std::boxed::Box<[i32; 3]>]); +// _1 = _2 as std::boxed::Box<[i32]> (Unsize); +// Validate(Acquire, [_1@std::boxed::Box<[i32]>]); +// } +// } +// END rustc.node4.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_3.rs b/src/test/mir-opt/validate_3.rs new file mode 100644 index 000000000000..6990167b4e1c --- /dev/null +++ b/src/test/mir-opt/validate_3.rs @@ -0,0 +1,45 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -Z verbose -Z mir-emit-validate + +struct Test { + x: i32 +} + +fn foo(_x: &i32) {} + +fn main() { + let t = Test { x: 0 }; + let t = &t; + foo(&t.x); +} + +// END RUST SOURCE +// START rustc.node16.EraseRegions.after.mir +// fn main() -> () { +// let mut _5: &ReErased i32; +// bb0: { +// Validate(Suspend(ReScope(Misc(NodeId(31)))), [((*_2).0: i32)@i32/ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 1 })) (imm)]); +// _5 = &ReErased ((*_2).0: i32); +// Validate(Acquire, [(*_5)@i32/ReScope(Misc(NodeId(31))) (imm)]); +// Validate(Suspend(ReScope(Misc(NodeId(31)))), [(*_5)@i32/ReScope(Misc(NodeId(31))) (imm)]); +// _4 = &ReErased (*_5); +// Validate(Acquire, [(*_4)@i32/ReScope(Misc(NodeId(31))) (imm)]); +// Validate(Release, [_4@&ReScope(Misc(NodeId(31))) i32]); +// _3 = const foo(_4) -> bb1; +// } +// bb1: { +// EndRegion(ReScope(Misc(NodeId(31)))); +// EndRegion(ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 1 }))); +// return; +// } +// } +// END rustc.node16.EraseRegions.after.mir From 29ed317ecb88458e74cd78003205368ba7d04cfb Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 28 Jul 2017 15:42:11 -0700 Subject: [PATCH 065/213] silence tidy --- src/test/mir-opt/validate_1.rs | 1 + src/test/mir-opt/validate_2.rs | 1 + src/test/mir-opt/validate_3.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs index 0059fc930620..868d23b03c21 100644 --- a/src/test/mir-opt/validate_1.rs +++ b/src/test/mir-opt/validate_1.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-tidy-linelength // compile-flags: -Z verbose -Z mir-emit-validate fn foo(_x: &mut i32) {} diff --git a/src/test/mir-opt/validate_2.rs b/src/test/mir-opt/validate_2.rs index f1562c8c34ce..a219c5fc78eb 100644 --- a/src/test/mir-opt/validate_2.rs +++ b/src/test/mir-opt/validate_2.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-tidy-linelength // compile-flags: -Z verbose -Z mir-emit-validate fn main() { diff --git a/src/test/mir-opt/validate_3.rs b/src/test/mir-opt/validate_3.rs index 6990167b4e1c..78957115f505 100644 --- a/src/test/mir-opt/validate_3.rs +++ b/src/test/mir-opt/validate_3.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// ignore-tidy-linelength // compile-flags: -Z verbose -Z mir-emit-validate struct Test { From a6993d6469f73adab1bc2a73e148d1caad0ab257 Mon Sep 17 00:00:00 2001 From: Vadim Petrochenkov Date: Sun, 30 Jul 2017 02:28:30 +0300 Subject: [PATCH 066/213] resolve: Fix instability in import suggestions --- src/librustc_resolve/lib.rs | 20 +++++++-- src/libsyntax_pos/symbol.rs | 2 +- src/test/compile-fail/issue-35675.rs | 67 ---------------------------- src/test/ui/issue-35675.rs | 16 +++++++ src/test/ui/issue-35675.stderr | 24 ++++++++-- 5 files changed, 55 insertions(+), 74 deletions(-) delete mode 100644 src/test/compile-fail/issue-35675.rs diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 88013b45a05a..a907b5399acc 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -546,7 +546,7 @@ impl<'a> PathSource<'a> { } } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Namespace { TypeNS, ValueNS, @@ -898,6 +898,19 @@ impl<'a> ModuleData<'a> { } } + fn for_each_child_stable)>(&self, mut f: F) { + let resolutions = self.resolutions.borrow(); + let mut resolutions = resolutions.iter().map(|(&(ident, ns), &resolution)| { + // Pre-compute keys for sorting + (ident.name.as_str(), ns, ident, resolution) + }) + .collect::>(); + resolutions.sort_unstable_by_key(|&(str, ns, ..)| (str, ns)); + for &(_, ns, ident, resolution) in resolutions.iter() { + resolution.borrow().binding.map(|binding| f(ident, ns, binding)); + } + } + fn def(&self) -> Option { match self.kind { ModuleKind::Def(def, _) => Some(def), @@ -3351,8 +3364,9 @@ impl<'a> Resolver<'a> { in_module_is_extern)) = worklist.pop() { self.populate_module_if_necessary(in_module); - in_module.for_each_child(|ident, ns, name_binding| { - + // We have to visit module children in deterministic order to avoid + // instabilities in reported imports (#43552). + in_module.for_each_child_stable(|ident, ns, name_binding| { // avoid imports entirely if name_binding.is_import() && !name_binding.is_extern_crate() { return; } // avoid non-importable candidates as well diff --git a/src/libsyntax_pos/symbol.rs b/src/libsyntax_pos/symbol.rs index debac70545a9..e49f1f28e5f1 100644 --- a/src/libsyntax_pos/symbol.rs +++ b/src/libsyntax_pos/symbol.rs @@ -326,7 +326,7 @@ fn with_interner T>(f: F) -> T { /// destroyed. In particular, they must not access string contents. This can /// be fixed in the future by just leaking all strings until thread death /// somehow. -#[derive(Clone, Hash, PartialOrd, Eq, Ord)] +#[derive(Clone, Copy, Hash, PartialOrd, Eq, Ord)] pub struct InternedString { string: &'static str, } diff --git a/src/test/compile-fail/issue-35675.rs b/src/test/compile-fail/issue-35675.rs deleted file mode 100644 index c09e56cbc5bc..000000000000 --- a/src/test/compile-fail/issue-35675.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// these two HELPs are actually in a new line between this line and the `enum Fruit` line -enum Fruit { //~ HELP possible candidate is found in another module, you can import it into scope - //~^ HELP possible candidate is found in another module, you can import it into scope - Apple(i64), - Orange(i64), -} - -fn should_return_fruit() -> Apple { - //~^ ERROR cannot find type `Apple` in this scope - //~| NOTE not found in this scope - //~| HELP you can try using the variant's enum - Apple(5) - //~^ ERROR cannot find function `Apple` in this scope - //~| NOTE not found in this scope -} - -fn should_return_fruit_too() -> Fruit::Apple { - //~^ ERROR expected type, found variant `Fruit::Apple` - //~| HELP you can try using the variant's enum - //~| NOTE not a type - Apple(5) - //~^ ERROR cannot find function `Apple` in this scope - //~| NOTE not found in this scope -} - -fn foo() -> Ok { - //~^ ERROR expected type, found variant `Ok` - //~| NOTE not a type - //~| HELP there is an enum variant - //~| HELP there is an enum variant - Ok(()) -} - -fn bar() -> Variant3 { - //~^ ERROR cannot find type `Variant3` in this scope - //~| HELP you can try using the variant's enum - //~| NOTE not found in this scope -} - -fn qux() -> Some { - //~^ ERROR expected type, found variant `Some` - //~| NOTE not a type - //~| HELP there is an enum variant - //~| HELP there is an enum variant - Some(1) -} - -fn main() {} - -mod x { - enum Enum { - Variant1, - Variant2(), - Variant3(usize), - Variant4 {}, - } -} diff --git a/src/test/ui/issue-35675.rs b/src/test/ui/issue-35675.rs index 391e1f2db5c0..001c1f2eddca 100644 --- a/src/test/ui/issue-35675.rs +++ b/src/test/ui/issue-35675.rs @@ -33,11 +33,27 @@ fn should_return_fruit_too() -> Fruit::Apple { //~| NOTE not found in this scope } +fn foo() -> Ok { + //~^ ERROR expected type, found variant `Ok` + //~| NOTE not a type + //~| HELP there is an enum variant + //~| HELP there is an enum variant + Ok(()) +} + fn bar() -> Variant3 { //~^ ERROR cannot find type `Variant3` in this scope //~| NOTE not found in this scope } +fn qux() -> Some { + //~^ ERROR expected type, found variant `Some` + //~| NOTE not a type + //~| HELP there is an enum variant + //~| HELP there is an enum variant + Some(1) +} + fn main() {} mod x { diff --git a/src/test/ui/issue-35675.stderr b/src/test/ui/issue-35675.stderr index c2c10724646e..ed330f47208e 100644 --- a/src/test/ui/issue-35675.stderr +++ b/src/test/ui/issue-35675.stderr @@ -38,14 +38,32 @@ help: possible candidate is found in another module, you can import it into scop 12 | use Fruit::Apple; | -error[E0412]: cannot find type `Variant3` in this scope +error[E0573]: expected type, found variant `Ok` --> $DIR/issue-35675.rs:36:13 | -36 | fn bar() -> Variant3 { +36 | fn foo() -> Ok { + | ^^ not a type + | + = help: there is an enum variant `std::prelude::v1::Ok`, try using `std::prelude::v1`? + = help: there is an enum variant `std::result::Result::Ok`, try using `std::result::Result`? + +error[E0412]: cannot find type `Variant3` in this scope + --> $DIR/issue-35675.rs:44:13 + | +44 | fn bar() -> Variant3 { | ^^^^^^^^ | | | not found in this scope | help: you can try using the variant's enum: `x::Enum` -error: aborting due to 5 previous errors +error[E0573]: expected type, found variant `Some` + --> $DIR/issue-35675.rs:49:13 + | +49 | fn qux() -> Some { + | ^^^^ not a type + | + = help: there is an enum variant `std::prelude::v1::Option::Some`, try using `std::prelude::v1::Option`? + = help: there is an enum variant `std::prelude::v1::Some`, try using `std::prelude::v1`? + +error: aborting due to 7 previous errors From 39ef545f109e40709c044ce5a836025ba0613a19 Mon Sep 17 00:00:00 2001 From: Daiki Mizukami Date: Sun, 30 Jul 2017 18:17:22 +0900 Subject: [PATCH 067/213] librustc_driver: Remove -Z option from usage on stable compiler --- src/librustc_driver/lib.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index e139f81416e3..d6b1eb86937b 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -795,7 +795,12 @@ fn usage(verbose: bool, include_unstable_options: bool) { (option.apply)(&mut options); } let message = format!("Usage: rustc [OPTIONS] INPUT"); - let extra_help = if verbose { + let nightly_help = if nightly_options::is_nightly_build() { + "\n -Z help Print internal options for debugging rustc" + } else { + "" + }; + let verbose_help = if verbose { "" } else { "\n --help -v Print the full set of options rustc accepts" @@ -803,11 +808,10 @@ fn usage(verbose: bool, include_unstable_options: bool) { println!("{}\nAdditional help: -C help Print codegen options -W help \ - Print 'lint' options and default settings - -Z help Print internal \ - options for debugging rustc{}\n", + Print 'lint' options and default settings{}{}\n", options.usage(&message), - extra_help); + nightly_help, + verbose_help); } fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) { From 302e51fdc9e921eda6b52eff00dc102ea2e27514 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 30 Jul 2017 15:11:15 +0200 Subject: [PATCH 068/213] Add colors for constants and unions --- src/librustdoc/html/static/rustdoc.css | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/librustdoc/html/static/rustdoc.css b/src/librustdoc/html/static/rustdoc.css index 567c8fb97180..9d237bba1bcd 100644 --- a/src/librustdoc/html/static/rustdoc.css +++ b/src/librustdoc/html/static/rustdoc.css @@ -438,6 +438,8 @@ a { .content span.struct, .content a.struct, .block a.current.struct { color: #df3600; } .content span.type, .content a.type, .block a.current.type { color: #e57300; } .content span.macro, .content a.macro, .block a.current.macro { color: #068000; } +.content span.union, .content a.union, .block a.current.union { color: #c0c74f; } +.content span.constant, .content a.constant, .block a.current.constant { color: #c7944f; } .block a.current.crate { font-weight: 500; } .search-input { From 851c77088db304f0fba5318db39b3b31521aa274 Mon Sep 17 00:00:00 2001 From: Tobias Schottdorf Date: Fri, 21 Jul 2017 19:29:43 -0400 Subject: [PATCH 069/213] default binding modes: add pat_binding_modes This PR kicks off the implementation of the [default binding modes RFC][1] by introducing the `pat_binding_modes` typeck table mentioned in the [mentoring instructions][2]. `pat_binding_modes` is populated in `librustc_typeck/check/_match.rs` and used wherever the HIR would be scraped prior to this PR. Unfortunately, one blemish, namely a two callers to `contains_explicit_ref_binding`, remains. This will likely have to be removed when the second part of [1], the `pat_adjustments` table, is tackled. Appropriate comments have been added. See #42640. [1]: https://github.com/rust-lang/rfcs/pull/2005 [2]: https://github.com/rust-lang/rust/issues/42640#issuecomment-313535089 --- src/librustc/hir/lowering.rs | 22 ++++++++----- src/librustc/hir/mod.rs | 27 ++++++++++++--- src/librustc/hir/pat_util.rs | 40 +++++++++++++---------- src/librustc/hir/print.rs | 12 ++++--- src/librustc/ich/impls_hir.rs | 8 +++-- src/librustc/ich/impls_ty.rs | 2 ++ src/librustc/middle/expr_use_visitor.rs | 28 +++++++++------- src/librustc/middle/mem_categorization.rs | 34 +++++++++++-------- src/librustc/middle/region.rs | 26 ++++++++++++++- src/librustc/ty/binding.rs | 35 ++++++++++++++++++++ src/librustc/ty/context.rs | 5 +++ src/librustc/ty/mod.rs | 4 +++ src/librustc_borrowck/borrowck/mod.rs | 11 ++++--- src/librustc_const_eval/check_match.rs | 26 +++++++++++---- src/librustc_const_eval/pattern.rs | 20 +++++++----- src/librustc_lint/unused.rs | 8 +++-- src/librustc_resolve/lib.rs | 3 +- src/librustc_typeck/check/_match.rs | 25 ++++++++++---- src/librustc_typeck/check/mod.rs | 4 ++- src/librustc_typeck/check/regionck.rs | 10 ++++-- src/librustc_typeck/check/writeback.rs | 9 +++++ src/libsyntax/parse/mod.rs | 15 +++++---- 22 files changed, 268 insertions(+), 106 deletions(-) create mode 100644 src/librustc/ty/binding.rs diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index 3ae3671b5934..421a81c0d234 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -2191,7 +2191,7 @@ impl<'a> LoweringContext<'a> { let next_ident = self.str_to_ident("__next"); let next_pat = self.pat_ident_binding_mode(e.span, next_ident, - hir::BindByValue(hir::MutMutable)); + hir::BindingAnnotation::Mutable); // `::std::option::Option::Some(val) => next = val` let pat_arm = { @@ -2215,8 +2215,9 @@ impl<'a> LoweringContext<'a> { }; // `mut iter` - let iter_pat = self.pat_ident_binding_mode(e.span, iter, - hir::BindByValue(hir::MutMutable)); + let iter_pat = self.pat_ident_binding_mode(e.span, + iter, + hir::BindingAnnotation::Mutable); // `match ::std::iter::Iterator::next(&mut iter) { ... }` let match_expr = { @@ -2503,10 +2504,13 @@ impl<'a> LoweringContext<'a> { } } - fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingMode { + fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingAnnotation { match *b { - BindingMode::ByRef(m) => hir::BindByRef(self.lower_mutability(m)), - BindingMode::ByValue(m) => hir::BindByValue(self.lower_mutability(m)), + BindingMode::ByValue(Mutability::Immutable) => + hir::BindingAnnotation::Unannotated, + BindingMode::ByRef(Mutability::Immutable) => hir::BindingAnnotation::Ref, + BindingMode::ByValue(Mutability::Mutable) => hir::BindingAnnotation::Mutable, + BindingMode::ByRef(Mutability::Mutable) => hir::BindingAnnotation::RefMut, } } @@ -2647,7 +2651,7 @@ impl<'a> LoweringContext<'a> { fn stmt_let(&mut self, sp: Span, mutbl: bool, ident: Name, ex: P) -> (hir::Stmt, NodeId) { let pat = if mutbl { - self.pat_ident_binding_mode(sp, ident, hir::BindByValue(hir::MutMutable)) + self.pat_ident_binding_mode(sp, ident, hir::BindingAnnotation::Mutable) } else { self.pat_ident(sp, ident) }; @@ -2703,10 +2707,10 @@ impl<'a> LoweringContext<'a> { } fn pat_ident(&mut self, span: Span, name: Name) -> P { - self.pat_ident_binding_mode(span, name, hir::BindByValue(hir::MutImmutable)) + self.pat_ident_binding_mode(span, name, hir::BindingAnnotation::Unannotated) } - fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingMode) + fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingAnnotation) -> P { let id = self.next_id(); let parent_def = self.parent_def.unwrap(); diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index a3a133daa09c..7f1d1480d46a 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -10,7 +10,6 @@ // The Rust HIR. -pub use self::BindingMode::*; pub use self::BinOp_::*; pub use self::BlockCheckMode::*; pub use self::CaptureClause::*; @@ -628,10 +627,28 @@ pub struct FieldPat { pub is_shorthand: bool, } +/// Explicit binding annotations given in the HIR for a binding. Note +/// that this is not the final binding *mode* that we infer after type +/// inference. #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum BindingMode { - BindByRef(Mutability), - BindByValue(Mutability), +pub enum BindingAnnotation { + /// No binding annotation given: this means that the final binding mode + /// will depend on whether we have skipped through a `&` reference + /// when matching. For example, the `x` in `Some(x)` will have binding + /// mode `None`; if you do `let Some(x) = &Some(22)`, it will + /// ultimately be inferred to be by-reference. + /// + /// Note that implicit reference skipping is not implemented yet (#42640). + Unannotated, + + /// Annotated with `mut x` -- could be either ref or not, similar to `None`. + Mutable, + + /// Annotated as `ref`, like `ref x` + Ref, + + /// Annotated as `ref mut x`. + RefMut, } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] @@ -647,7 +664,7 @@ pub enum PatKind { /// A fresh binding `ref mut binding @ OPT_SUBPATTERN`. /// The `DefId` is for the definition of the variable being bound. - Binding(BindingMode, DefId, Spanned, Option>), + Binding(BindingAnnotation, DefId, Spanned, Option>), /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`. /// The `bool` is `true` in the presence of a `..`. diff --git a/src/librustc/hir/pat_util.rs b/src/librustc/hir/pat_util.rs index 0190e74df695..144cb34ee356 100644 --- a/src/librustc/hir/pat_util.rs +++ b/src/librustc/hir/pat_util.rs @@ -87,7 +87,7 @@ impl hir::Pat { /// Call `f` on every "binding" in a pattern, e.g., on `a` in /// `match foo() { Some(a) => (), None => () }` pub fn each_binding(&self, mut f: F) - where F: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned), + where F: FnMut(hir::BindingAnnotation, ast::NodeId, Span, &Spanned), { self.walk(|p| { if let PatKind::Binding(binding_mode, _, ref pth, _) = p.node { @@ -130,12 +130,10 @@ impl hir::Pat { pub fn simple_name(&self) -> Option { match self.node { - PatKind::Binding(hir::BindByValue(..), _, ref path1, None) => { - Some(path1.node) - } - _ => { - None - } + PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ref path1, None) | + PatKind::Binding(hir::BindingAnnotation::Mutable, _, ref path1, None) => + Some(path1.node), + _ => None, } } @@ -163,16 +161,22 @@ impl hir::Pat { } /// Checks if the pattern contains any `ref` or `ref mut` bindings, - /// and if yes whether its containing mutable ones or just immutables ones. - pub fn contains_ref_binding(&self) -> Option { + /// and if yes whether it contains mutable or just immutables ones. + /// + /// FIXME(tschottdorf): this is problematic as the HIR is being scraped, + /// but ref bindings may be implicit after #42640. + pub fn contains_explicit_ref_binding(&self) -> Option { let mut result = None; - self.each_binding(|mode, _, _, _| { - if let hir::BindingMode::BindByRef(m) = mode { - // Pick Mutable as maximum - match result { - None | Some(hir::MutImmutable) => result = Some(m), - _ => (), + self.each_binding(|annotation, _, _, _| { + match annotation { + hir::BindingAnnotation::Ref => { + match result { + None | Some(hir::MutImmutable) => result = Some(hir::MutImmutable), + _ => (), + } } + hir::BindingAnnotation::RefMut => result = Some(hir::MutMutable), + _ => (), } }); result @@ -182,9 +186,11 @@ impl hir::Pat { impl hir::Arm { /// Checks if the patterns for this arm contain any `ref` or `ref mut` /// bindings, and if yes whether its containing mutable ones or just immutables ones. - pub fn contains_ref_binding(&self) -> Option { + pub fn contains_explicit_ref_binding(&self) -> Option { + // FIXME(tschottdorf): contains_explicit_ref_binding() must be removed + // for #42640. self.pats.iter() - .filter_map(|pat| pat.contains_ref_binding()) + .filter_map(|pat| pat.contains_explicit_ref_binding()) .max_by_key(|m| match *m { hir::MutMutable => 1, hir::MutImmutable => 0, diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs index beaf65b77d81..abfb00a24a11 100644 --- a/src/librustc/hir/print.rs +++ b/src/librustc/hir/print.rs @@ -1651,12 +1651,16 @@ impl<'a> State<'a> { PatKind::Wild => self.s.word("_")?, PatKind::Binding(binding_mode, _, ref path1, ref sub) => { match binding_mode { - hir::BindByRef(mutbl) => { + hir::BindingAnnotation::Ref => { self.word_nbsp("ref")?; - self.print_mutability(mutbl)?; + self.print_mutability(hir::MutImmutable)?; } - hir::BindByValue(hir::MutImmutable) => {} - hir::BindByValue(hir::MutMutable) => { + hir::BindingAnnotation::RefMut => { + self.word_nbsp("ref")?; + self.print_mutability(hir::MutMutable)?; + } + hir::BindingAnnotation::Unannotated => {} + hir::BindingAnnotation::Mutable => { self.word_nbsp("mut")?; } } diff --git a/src/librustc/ich/impls_hir.rs b/src/librustc/ich/impls_hir.rs index 7805029a67ff..b344084f580b 100644 --- a/src/librustc/ich/impls_hir.rs +++ b/src/librustc/ich/impls_hir.rs @@ -442,9 +442,11 @@ impl_stable_hash_for!(struct hir::FieldPat { is_shorthand }); -impl_stable_hash_for!(enum hir::BindingMode { - BindByRef(mutability), - BindByValue(mutability) +impl_stable_hash_for!(enum hir::BindingAnnotation { + Unannotated, + Mutable, + Ref, + RefMut }); impl_stable_hash_for!(enum hir::RangeEnd { diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index 3e227872848e..e03cbb45414d 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -617,6 +617,7 @@ for ty::TypeckTables<'tcx> { ref node_types, ref node_substs, ref adjustments, + ref pat_binding_modes, ref upvar_capture_map, ref closure_tys, ref closure_kinds, @@ -637,6 +638,7 @@ for ty::TypeckTables<'tcx> { ich::hash_stable_nodemap(hcx, hasher, node_types); ich::hash_stable_nodemap(hcx, hasher, node_substs); ich::hash_stable_nodemap(hcx, hasher, adjustments); + ich::hash_stable_nodemap(hcx, hasher, pat_binding_modes); ich::hash_stable_hashmap(hcx, hasher, upvar_capture_map, |hcx, up_var_id| { let ty::UpvarId { var_id, diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index 259bd4f09991..87e933e85e22 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -796,16 +796,19 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { debug!("determine_pat_move_mode cmt_discr={:?} pat={:?}", cmt_discr, pat); return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |cmt_pat, pat| { - match pat.node { - PatKind::Binding(hir::BindByRef(..), ..) => - mode.lub(BorrowingMatch), - PatKind::Binding(hir::BindByValue(..), ..) => { - match copy_or_move(&self.mc, self.param_env, &cmt_pat, PatBindingMove) { - Copy => mode.lub(CopyingMatch), - Move(..) => mode.lub(MovingMatch), + if let PatKind::Binding(..) = pat.node { + let bm = *self.mc.tables.pat_binding_modes.get(&pat.id) + .expect("missing binding mode"); + match bm { + ty::BindByReference(..) => + mode.lub(BorrowingMatch), + ty::BindByValue(..) => { + match copy_or_move(&self.mc, self.param_env, &cmt_pat, PatBindingMove) { + Copy => mode.lub(CopyingMatch), + Move(..) => mode.lub(MovingMatch), + } } } - _ => {} } })); } @@ -818,8 +821,9 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { let ExprUseVisitor { ref mc, ref mut delegate, param_env } = *self; return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |cmt_pat, pat| { - if let PatKind::Binding(bmode, def_id, ..) = pat.node { + if let PatKind::Binding(_, def_id, ..) = pat.node { debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode); + let bm = *mc.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"); // pat_ty: the type of the binding being produced. let pat_ty = return_if_err!(mc.node_ty(pat.id)); @@ -832,14 +836,14 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { } // It is also a borrow or copy/move of the value being matched. - match bmode { - hir::BindByRef(m) => { + match bm { + ty::BindByReference(m) => { if let ty::TyRef(r, _) = pat_ty.sty { let bk = ty::BorrowKind::from_mutbl(m); delegate.borrow(pat.id, pat.span, cmt_pat, r, bk, RefBinding); } } - hir::BindByValue(..) => { + ty::BindByValue(..) => { let mode = copy_or_move(mc, param_env, &cmt_pat, PatBindingMove); debug!("walk_pat binding consuming pat"); delegate.consume_pat(pat, cmt_pat, mode); diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 557d4b24f303..b4993aafc4c9 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -330,11 +330,12 @@ impl MutabilityCategory { ret } - fn from_local(tcx: TyCtxt, id: ast::NodeId) -> MutabilityCategory { + fn from_local(tcx: TyCtxt, tables: &ty::TypeckTables, id: ast::NodeId) -> MutabilityCategory { let ret = match tcx.hir.get(id) { hir_map::NodeLocal(p) => match p.node { - PatKind::Binding(bind_mode, ..) => { - if bind_mode == hir::BindByValue(hir::MutMutable) { + PatKind::Binding(..) => { + let bm = *tables.pat_binding_modes.get(&p.id).expect("missing binding mode"); + if bm == ty::BindByValue(hir::MutMutable) { McDeclared } else { McImmutable @@ -475,16 +476,21 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // *being borrowed* is. But ideally we would put in a more // fundamental fix to this conflated use of the node id. let ret_ty = match pat.node { - PatKind::Binding(hir::BindByRef(_), ..) => { - // a bind-by-ref means that the base_ty will be the type of the ident itself, - // but what we want here is the type of the underlying value being borrowed. - // So peel off one-level, turning the &T into T. - match base_ty.builtin_deref(false, ty::NoPreference) { - Some(t) => t.ty, - None => { - debug!("By-ref binding of non-derefable type {:?}", base_ty); - return Err(()); + PatKind::Binding(..) => { + let bm = *self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"); + if let ty::BindByReference(_) = bm { + // a bind-by-ref means that the base_ty will be the type of the ident itself, + // but what we want here is the type of the underlying value being borrowed. + // So peel off one-level, turning the &T into T. + match base_ty.builtin_deref(false, ty::NoPreference) { + Some(t) => t.ty, + None => { + debug!("By-ref binding of non-derefable type {:?}", base_ty); + return Err(()); + } } + } else { + base_ty } } _ => base_ty, @@ -659,7 +665,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { id, span, cat: Categorization::Local(vid), - mutbl: MutabilityCategory::from_local(self.tcx, vid), + mutbl: MutabilityCategory::from_local(self.tcx, self.tables, vid), ty: expr_ty, note: NoteNone })) @@ -711,7 +717,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { let var_ty = self.node_ty(var_id)?; // Mutability of original variable itself - let var_mutbl = MutabilityCategory::from_local(self.tcx, var_id); + let var_mutbl = MutabilityCategory::from_local(self.tcx, self.tables, var_id); // Construct the upvar. This represents access to the field // from the environment (perhaps we should eventually desugar diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 39cb5d1b8c8e..9133a5e777db 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -889,8 +889,32 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, /// | ( ..., P&, ... ) /// | box P& fn is_binding_pat(pat: &hir::Pat) -> bool { + // Note that the code below looks for *explicit* refs only, that is, it won't + // know about *implicit* refs as introduced in #42640. + // + // This is not a problem. For example, consider + // + // let (ref x, ref y) = (Foo { .. }, Bar { .. }); + // + // Due to the explicit refs on the left hand side, the below code would signal + // that the temporary value on the right hand side should live until the end of + // the enclosing block (as opposed to being dropped after the let is complete). + // + // To create an implicit ref, however, you must have a borrowed value on the RHS + // already, as in this example (which won't compile before #42640): + // + // let Foo { x, .. } = &Foo { x: ..., ... }; + // + // in place of + // + // let Foo { ref x, .. } = Foo { ... }; + // + // In the former case (the implicit ref version), the temporary is created by the + // & expression, and its lifetime would be extended to the end of the block (due + // to a different rule, not the below code). match pat.node { - PatKind::Binding(hir::BindByRef(_), ..) => true, + PatKind::Binding(hir::BindingAnnotation::Ref, ..) | + PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true, PatKind::Struct(_, ref field_pats, _) => { field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat)) diff --git a/src/librustc/ty/binding.rs b/src/librustc/ty/binding.rs new file mode 100644 index 000000000000..3db61b76cc55 --- /dev/null +++ b/src/librustc/ty/binding.rs @@ -0,0 +1,35 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::BindingAnnotation::*; +use hir::BindingAnnotation; +use hir::Mutability; + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum BindingMode { + BindByReference(Mutability), + BindByValue(Mutability), +} + +impl BindingMode { + pub fn convert(ba: BindingAnnotation) -> BindingMode { + match ba { + Unannotated => BindingMode::BindByValue(Mutability::MutImmutable), + Mutable => BindingMode::BindByValue(Mutability::MutMutable), + Ref => BindingMode::BindByReference(Mutability::MutImmutable), + RefMut => BindingMode::BindByReference(Mutability::MutMutable), + } + } +} + +impl_stable_hash_for!(enum self::BindingMode { + BindByReference(mutability), + BindByValue(mutability) +}); diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 45ddd4c0ff17..be3cd99426d4 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -40,6 +40,7 @@ use ty::layout::{Layout, TargetDataLayout}; use ty::inhabitedness::DefIdForest; use ty::maps; use ty::steal::Steal; +use ty::BindingMode; use util::nodemap::{NodeMap, NodeSet, DefIdSet}; use util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::accumulate_vec::AccumulateVec; @@ -223,6 +224,9 @@ pub struct TypeckTables<'tcx> { pub adjustments: NodeMap>>, + // Stores the actual binding mode for all instances of hir::BindingAnnotation. + pub pat_binding_modes: NodeMap, + /// Borrows pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>, @@ -274,6 +278,7 @@ impl<'tcx> TypeckTables<'tcx> { node_types: FxHashMap(), node_substs: NodeMap(), adjustments: NodeMap(), + pat_binding_modes: NodeMap(), upvar_capture_map: FxHashMap(), closure_tys: NodeMap(), closure_kinds: NodeMap(), diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 804f47b5283f..914419ede361 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -74,6 +74,9 @@ pub use self::sty::InferTy::*; pub use self::sty::RegionKind::*; pub use self::sty::TypeVariants::*; +pub use self::binding::BindingMode; +pub use self::binding::BindingMode::*; + pub use self::context::{TyCtxt, GlobalArenas, tls}; pub use self::context::{Lift, TypeckTables}; @@ -84,6 +87,7 @@ pub use self::trait_def::TraitDef; pub use self::maps::queries; pub mod adjustment; +pub mod binding; pub mod cast; pub mod error; pub mod fast_reject; diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 1bfc5805bc8f..0124a77349ca 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -871,14 +871,15 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } - fn local_binding_mode(&self, node_id: ast::NodeId) -> hir::BindingMode { + fn local_binding_mode(&self, node_id: ast::NodeId) -> ty::BindingMode { let pat = match self.tcx.hir.get(node_id) { hir_map::Node::NodeLocal(pat) => pat, node => bug!("bad node for local: {:?}", node) }; match pat.node { - hir::PatKind::Binding(mode, ..) => mode, + hir::PatKind::Binding(..) => + *self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"), _ => bug!("local is not a binding: {:?}", pat) } } @@ -913,7 +914,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { Some(ImmutabilityBlame::ClosureEnv(_)) => {} Some(ImmutabilityBlame::ImmLocal(node_id)) => { let let_span = self.tcx.hir.span(node_id); - if let hir::BindingMode::BindByValue(..) = self.local_binding_mode(node_id) { + if let ty::BindByValue(..) = self.local_binding_mode(node_id) { if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(let_span) { let (_, is_implicit_self) = self.local_ty(node_id); if is_implicit_self && snippet != "self" { @@ -930,7 +931,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { Some(ImmutabilityBlame::LocalDeref(node_id)) => { let let_span = self.tcx.hir.span(node_id); match self.local_binding_mode(node_id) { - hir::BindingMode::BindByRef(..) => { + ty::BindByReference(..) => { let snippet = self.tcx.sess.codemap().span_to_snippet(let_span); if let Ok(snippet) = snippet { db.span_label( @@ -940,7 +941,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { ); } } - hir::BindingMode::BindByValue(..) => { + ty::BindByValue(..) => { if let (Some(local_ty), is_implicit_self) = self.local_ty(node_id) { if let Some(msg) = self.suggest_mut_for_immutable(local_ty, is_implicit_self) { diff --git a/src/librustc_const_eval/check_match.rs b/src/librustc_const_eval/check_match.rs index 95c8613232ec..060ff503d4e5 100644 --- a/src/librustc_const_eval/check_match.rs +++ b/src/librustc_const_eval/check_match.rs @@ -268,7 +268,12 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { fn check_for_bindings_named_the_same_as_variants(cx: &MatchVisitor, pat: &Pat) { pat.walk(|p| { - if let PatKind::Binding(hir::BindByValue(hir::MutImmutable), _, name, None) = p.node { + if let PatKind::Binding(_, _, name, None) = p.node { + let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode"); + if bm != ty::BindByValue(hir::MutImmutable) { + // Nothing to check. + return true; + } let pat_ty = cx.tables.pat_ty(p); if let ty::TyAdt(edef, _) = pat_ty.sty { if edef.is_enum() && edef.variants.iter().any(|variant| { @@ -452,8 +457,9 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor, pats: &[P]) { let mut by_ref_span = None; for pat in pats { - pat.each_binding(|bm, _, span, _path| { - if let hir::BindByRef(..) = bm { + pat.each_binding(|_, id, span, _path| { + let bm = *cx.tables.pat_binding_modes.get(&id).expect("missing binding mode"); + if let ty::BindByReference(..) = bm { by_ref_span = Some(span); } }) @@ -484,10 +490,16 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor, for pat in pats { pat.walk(|p| { - if let PatKind::Binding(hir::BindByValue(..), _, _, ref sub) = p.node { - let pat_ty = cx.tables.node_id_to_type(p.id); - if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { - check_move(p, sub.as_ref().map(|p| &**p)); + if let PatKind::Binding(_, _, _, ref sub) = p.node { + let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode"); + match bm { + ty::BindByValue(..) => { + let pat_ty = cx.tables.node_id_to_type(p.id); + if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { + check_move(p, sub.as_ref().map(|p| &**p)); + } + } + _ => {} } } true diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs index ab919da81520..f37a112a596a 100644 --- a/src/librustc_const_eval/pattern.rs +++ b/src/librustc_const_eval/pattern.rs @@ -374,27 +374,31 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } } - PatKind::Binding(bm, def_id, ref ident, ref sub) => { + PatKind::Binding(_, def_id, ref ident, ref sub) => { let id = self.tcx.hir.as_local_node_id(def_id).unwrap(); let var_ty = self.tables.node_id_to_type(pat.id); let region = match var_ty.sty { ty::TyRef(r, _) => Some(r), _ => None, }; + let bm = *self.tables.pat_binding_modes.get(&pat.id) + .expect("missing binding mode"); let (mutability, mode) = match bm { - hir::BindByValue(hir::MutMutable) => + ty::BindByValue(hir::MutMutable) => (Mutability::Mut, BindingMode::ByValue), - hir::BindByValue(hir::MutImmutable) => + ty::BindByValue(hir::MutImmutable) => (Mutability::Not, BindingMode::ByValue), - hir::BindByRef(hir::MutMutable) => - (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Mut)), - hir::BindByRef(hir::MutImmutable) => - (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)), + ty::BindByReference(hir::MutMutable) => + (Mutability::Not, BindingMode::ByRef( + region.unwrap(), BorrowKind::Mut)), + ty::BindByReference(hir::MutImmutable) => + (Mutability::Not, BindingMode::ByRef( + region.unwrap(), BorrowKind::Shared)), }; // A ref x pattern is the same node used for x, and as such it has // x's type, which is &T, where we want T (the type being matched). - if let hir::BindByRef(_) = bm { + if let ty::BindByReference(_) = bm { if let ty::TyRef(_, mt) = ty.sty { ty = mt.ty; } else { diff --git a/src/librustc_lint/unused.rs b/src/librustc_lint/unused.rs index 473c0f3ffda8..d7d0dc7cb352 100644 --- a/src/librustc_lint/unused.rs +++ b/src/librustc_lint/unused.rs @@ -44,9 +44,13 @@ impl UnusedMut { let mut mutables = FxHashMap(); for p in pats { - p.each_binding(|mode, id, _, path1| { + p.each_binding(|_, id, span, path1| { + let bm = match cx.tables.pat_binding_modes.get(&id) { + Some(&bm) => bm, + None => span_bug!(span, "missing binding mode"), + }; let name = path1.node; - if let hir::BindByValue(hir::MutMutable) = mode { + if let ty::BindByValue(hir::MutMutable) = bm { if !name.as_str().starts_with("_") { match mutables.entry(name) { Vacant(entry) => { diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 88013b45a05a..349a21af8958 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -2277,8 +2277,9 @@ impl<'a> Resolver<'a> { false, pat.span) .and_then(LexicalScopeBinding::item); let resolution = binding.map(NameBinding::def).and_then(|def| { + let ivmode = BindingMode::ByValue(Mutability::Immutable); let always_binding = !pat_src.is_refutable() || opt_pat.is_some() || - bmode != BindingMode::ByValue(Mutability::Immutable); + bmode != ivmode; match def { Def::StructCtor(_, CtorKind::Const) | Def::VariantCtor(_, CtorKind::Const) | diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index 68726a7b1c4e..01d2986a53ca 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -113,10 +113,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.demand_eqtype(pat.span, expected, rhs_ty); common_type } - PatKind::Binding(bm, def_id, _, ref sub) => { + PatKind::Binding(ba, def_id, _, ref sub) => { + // Note the binding mode in the typeck tables. For now, what we store is always + // identical to what could be scraped from the HIR, but this will change with + // default binding modes (#42640). + let bm = ty::BindingMode::convert(ba); + self.inh.tables.borrow_mut().pat_binding_modes.insert(pat.id, bm); + let typ = self.local_ty(pat.span, pat.id); match bm { - hir::BindByRef(mutbl) => { + ty::BindByReference(mutbl) => { // if the binding is like // ref x | ref const x | ref mut x // then `x` is assigned a value of type `&M T` where M is the mutability @@ -131,7 +137,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.demand_eqtype(pat.span, region_ty, typ); } // otherwise the type of x is the expected type T - hir::BindByValue(_) => { + ty::BindByValue(_) => { // As above, `T <: typeof(x)` is required but we // use equality, see (*) below. self.demand_eqtype(pat.span, expected, typ); @@ -396,11 +402,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match_src: hir::MatchSource) -> Ty<'tcx> { let tcx = self.tcx; - // Not entirely obvious: if matches may create ref bindings, we - // want to use the *precise* type of the discriminant, *not* some - // supertype, as the "discriminant type" (issue #23116). + // Not entirely obvious: if matches may create ref bindings, we want to + // use the *precise* type of the discriminant, *not* some supertype, as + // the "discriminant type" (issue #23116). + // + // FIXME(tschottdorf): don't call contains_explicit_ref_binding, which + // is problematic as the HIR is being scraped, but ref bindings may be + // implicit after #42640. We need to make sure that pat_adjustments + // (once introduced) is populated by the time we get here. let contains_ref_bindings = arms.iter() - .filter_map(|a| a.contains_ref_binding()) + .filter_map(|a| a.contains_explicit_ref_binding()) .max_by_key(|m| match *m { hir::MutMutable => 1, hir::MutImmutable => 0, diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 7f69885047b9..37fd0dd15861 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -3999,7 +3999,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { local: &'gcx hir::Local, init: &'gcx hir::Expr) -> Ty<'tcx> { - let ref_bindings = local.pat.contains_ref_binding(); + // FIXME(tschottdorf): contains_explicit_ref_binding() must be removed + // for #42640. + let ref_bindings = local.pat.contains_explicit_ref_binding(); let local_ty = self.local_ty(init.span, local.id); if let Some(m) = ref_bindings { diff --git a/src/librustc_typeck/check/regionck.rs b/src/librustc_typeck/check/regionck.rs index 82207428efc4..9b7ecc194ca8 100644 --- a/src/librustc_typeck/check/regionck.rs +++ b/src/librustc_typeck/check/regionck.rs @@ -1196,9 +1196,13 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { mc.cat_pattern(discr_cmt, root_pat, |sub_cmt, sub_pat| { match sub_pat.node { // `ref x` pattern - PatKind::Binding(hir::BindByRef(mutbl), ..) => { - self.link_region_from_node_type(sub_pat.span, sub_pat.id, - mutbl, sub_cmt); + PatKind::Binding(..) => { + let bm = *mc.tables.pat_binding_modes.get(&sub_pat.id) + .expect("missing binding mode"); + if let ty::BindByReference(mutbl) = bm { + self.link_region_from_node_type(sub_pat.span, sub_pat.id, + mutbl, sub_cmt); + } } _ => {} } diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index 81e5dae5477e..0a323efabec1 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -178,6 +178,15 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { } fn visit_pat(&mut self, p: &'gcx hir::Pat) { + match p.node { + hir::PatKind::Binding(..) => { + let bm = *self.fcx.tables.borrow().pat_binding_modes.get(&p.id) + .expect("missing binding mode"); + self.tables.pat_binding_modes.insert(p.id, bm); + } + _ => {} + }; + self.visit_node_id(p.span, p.id); intravisit::walk_pat(self, p); } diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 7b105a8fa14c..893bada2670d 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -867,13 +867,14 @@ mod tests { pat: P(ast::Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident( - ast::BindingMode::ByValue(ast::Mutability::Immutable), - Spanned{ - span: sp(6,7), - node: Ident::from_str("b")}, - None - ), - span: sp(6,7) + ast::BindingMode::ByValue( + ast::Mutability::Immutable), + Spanned{ + span: sp(6,7), + node: Ident::from_str("b")}, + None + ), + span: sp(6,7) }), id: ast::DUMMY_NODE_ID }], From 6641415e8744e3f82f464eba270296536b1edaa1 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 30 Jul 2017 10:29:15 -0700 Subject: [PATCH 070/213] do not use doc comments inside functions --- src/librustc_mir/transform/add_validation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 70ef08cf2d18..ee472c616f65 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -91,7 +91,7 @@ impl MirPass for AddValidation { let local_decls = mir.local_decls.clone(); // FIXME: Find a way to get rid of this clone. - /// Convert an lvalue to a validation operand. + // Convert an lvalue to a validation operand. let lval_to_operand = |lval: Lvalue<'tcx>| -> ValidationOperand<'tcx, Lvalue<'tcx>> { let (re, mutbl) = lval_context(&lval, &local_decls, tcx); let ty = lval.ty(&local_decls, tcx).to_ty(tcx); From 2574f31b9b71cd2b37002dbfc9a818c3cc805498 Mon Sep 17 00:00:00 2001 From: Niko Matsakis Date: Sat, 29 Jul 2017 21:40:37 +0300 Subject: [PATCH 071/213] save the subobligations as well --- src/librustc/traits/project.rs | 18 +++++--- src/test/run-pass/issue-43132.rs | 74 ++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 6 deletions(-) create mode 100644 src/test/run-pass/issue-43132.rs diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index cae1eba5797c..7cce9c398bb4 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -462,13 +462,19 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( selcx.infcx().report_overflow_error(&obligation, false); } Err(ProjectionCacheEntry::NormalizedTy(ty)) => { - // If we find the value in the cache, then the obligations - // have already been returned from the previous entry (and - // should therefore have been honored). + // If we find the value in the cache, then return it along + // with the obligations that went along with it. Note + // that, when using a fulfillment context, these + // obligations could in principle be ignored: they have + // already been registered when the cache entry was + // created (and hence the new ones will quickly be + // discarded as duplicated). But when doing trait + // evaluation this is not the case, and dropping the trait + // evaluations can causes ICEs (e.g. #43132). debug!("opt_normalize_projection_type: \ found normalized ty `{:?}`", ty); - return Some(NormalizedTy { value: ty, obligations: vec![] }); + return Some(ty); } Err(ProjectionCacheEntry::Error) => { debug!("opt_normalize_projection_type: \ @@ -1326,7 +1332,7 @@ enum ProjectionCacheEntry<'tcx> { InProgress, Ambiguous, Error, - NormalizedTy(Ty<'tcx>), + NormalizedTy(NormalizedTy<'tcx>), } // NB: intentionally not Clone @@ -1374,7 +1380,7 @@ impl<'tcx> ProjectionCache<'tcx> { fn complete(&mut self, key: ty::ProjectionTy<'tcx>, value: &NormalizedTy<'tcx>) { debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}", key, value); - let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.value)); + let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.clone())); assert!(!fresh_key, "never started projecting `{:?}`", key); } diff --git a/src/test/run-pass/issue-43132.rs b/src/test/run-pass/issue-43132.rs new file mode 100644 index 000000000000..64b3b092b893 --- /dev/null +++ b/src/test/run-pass/issue-43132.rs @@ -0,0 +1,74 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused)] + +fn main() { +} + +fn foo() { + let b = mk::< + Forward<(Box>,)>, + >(); + b.map_err(|_| ()).join(); +} + +fn mk() -> T { + loop {} +} + +impl, E> Future for (I,) { + type Error = E; +} + +struct Forward { + _a: T, +} + +impl Future for Forward +where + T::Error: From, +{ + type Error = T::Error; +} + +trait Future { + type Error; + + fn map_err(self, _: F) -> (Self, F) + where + F: FnOnce(Self::Error) -> E, + Self: Sized, + { + loop {} + } + + fn join(self) -> (MaybeDone, ()) + where + Self: Sized, + { + loop {} + } +} + +impl Future for Box { + type Error = S::Error; +} + +enum MaybeDone { + _Done(A::Error), +} + +impl Future for (A, F) +where + F: FnOnce(A::Error) -> U, +{ + type Error = U; +} From 6f14ff105f58672476e79bf3eff88d0673bbf0b2 Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Thu, 27 Jul 2017 14:22:49 -0700 Subject: [PATCH 072/213] add extended info for E0436 functional record update syntax needs struct This example focuses on struct-like enum variants, because it's not immediately obvious in what other context we can get E0436 alone, without any other, more serious, errors. (Triggering E0436 with a union also emits a separate "union expressions should have exactly one field" error.) (One might argue that we ought to accept the functional record update syntax for struct-like enums, but that is beyond the scope of this error-index-comprehensiveness commit.) --- src/librustc_typeck/diagnostics.rs | 51 +++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 60f32408abba..a1ac89051c84 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -3457,6 +3457,56 @@ impl Foo for i32 { ``` "##, +E0436: r##" +The functional record update syntax is only allowed for structs. (Struct-like +enum variants don't qualify, for example.) + +Erroneous code example: + +```compile_fail,E0436 +enum PublicationFrequency { + Weekly, + SemiMonthly { days: (u8, u8), annual_special: bool }, +} + +fn one_up_competitor(competitor_frequency: PublicationFrequency) + -> PublicationFrequency { + match competitor_frequency { + PublicationFrequency::Weekly => PublicationFrequency::SemiMonthly { + days: (1, 15), annual_special: false + }, + c @ PublicationFrequency::SemiMonthly{ .. } => + PublicationFrequency::SemiMonthly { + annual_special: true, ..c // error: functional record update + // syntax requires a struct + } + } +} +``` + +Rewrite the expression without functional record update syntax: + +``` +enum PublicationFrequency { + Weekly, + SemiMonthly { days: (u8, u8), annual_special: bool }, +} + +fn one_up_competitor(competitor_frequency: PublicationFrequency) + -> PublicationFrequency { + match competitor_frequency { + PublicationFrequency::Weekly => PublicationFrequency::SemiMonthly { + days: (1, 15), annual_special: false + }, + PublicationFrequency::SemiMonthly{ days, .. } => + PublicationFrequency::SemiMonthly { + days, annual_special: true // ok! + } + } +} +``` +"##, + E0439: r##" The length of the platform-intrinsic function `simd_shuffle` wasn't specified. Erroneous code example: @@ -4655,7 +4705,6 @@ register_diagnostics! { // E0372, // coherence not object safe E0377, // the trait `CoerceUnsized` may only be implemented for a coercion // between structures with the same definition - E0436, // functional record update requires a struct E0521, // redundant default implementations of trait E0533, // `{}` does not name a unit variant, unit struct or a constant E0563, // cannot determine a type for this `impl Trait`: {} From 5605d58fc79e1cdacfbf01266a2d46b76df53ede Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Thu, 27 Jul 2017 15:08:29 -0700 Subject: [PATCH 073/213] move extended info for E0569 to numerical-order location in file We want the error explanations to appear in numerical order so that they're easy to find. (Also, any other order would be arbitrary and thus not constitute a Schelling point.) Bizarrely, the extended information for E0569 was placed between E0244 and E0318 in librustc_typeck/diagnostics.rs (when the code was introduced in 9a649c32). This commit moves it to be between E0562 and E0570, where it belongs. (Also, at reviewer request, say "Erroneous code example", the standard verbiage that it has been decided that we say everywhere.) --- src/librustc_typeck/diagnostics.rs | 42 ++++++++++++++++-------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index a1ac89051c84..3037e8d4a160 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -2631,26 +2631,6 @@ struct Bar { x: Foo } ``` "##, -E0569: r##" -If an impl has a generic parameter with the `#[may_dangle]` attribute, then -that impl must be declared as an `unsafe impl. For example: - -```compile_fail,E0569 -#![feature(generic_param_attrs)] -#![feature(dropck_eyepatch)] - -struct Foo(X); -impl<#[may_dangle] X> Drop for Foo { - fn drop(&mut self) { } -} -``` - -In this example, we are asserting that the destructor for `Foo` will not -access any data of type `X`, and require this assertion to be true for -overall safety in our program. The compiler does not currently attempt to -verify this assertion; therefore we must tag this `impl` as unsafe. -"##, - E0318: r##" Default impls for a trait must be located in the same crate where the trait was defined. For more information see the [opt-in builtin traits RFC][RFC 19]. @@ -3976,6 +3956,28 @@ See [RFC 1522] for more details. [RFC 1522]: https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md "##, +E0569: r##" +If an impl has a generic parameter with the `#[may_dangle]` attribute, then +that impl must be declared as an `unsafe impl. + +Erroneous code example: + +```compile_fail,E0569 +#![feature(generic_param_attrs)] +#![feature(dropck_eyepatch)] + +struct Foo(X); +impl<#[may_dangle] X> Drop for Foo { + fn drop(&mut self) { } +} +``` + +In this example, we are asserting that the destructor for `Foo` will not +access any data of type `X`, and require this assertion to be true for +overall safety in our program. The compiler does not currently attempt to +verify this assertion; therefore we must tag this `impl` as unsafe. +"##, + E0570: r##" The requested ABI is unsupported by the current target. From 7dab9812c4f76aac6e442ff053c34c076b76643d Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Thu, 27 Jul 2017 21:42:03 -0700 Subject: [PATCH 074/213] extended info for E0595 closure cannot mutate immutable local variable --- src/librustc_borrowck/diagnostics.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/librustc_borrowck/diagnostics.rs b/src/librustc_borrowck/diagnostics.rs index 38dcc7312369..fea9d0d6f132 100644 --- a/src/librustc_borrowck/diagnostics.rs +++ b/src/librustc_borrowck/diagnostics.rs @@ -1132,6 +1132,24 @@ fn main() { ``` "##, +E0595: r##" +Closures cannot mutate immutable captured variables. + +Erroneous code example: + +```compile_fail,E0595 +let x = 3; // error: closure cannot assign to immutable local variable `x` +let mut c = || { x += 1 }; +``` + +Make the variable binding mutable: + +``` +let mut x = 3; // ok! +let mut c = || { x += 1 }; +``` +"##, + E0596: r##" This error occurs because you tried to mutably borrow a non-mutable variable. @@ -1189,6 +1207,5 @@ register_diagnostics! { // E0385, // {} in an aliasable location E0524, // two closures require unique access to `..` at the same time E0594, // cannot assign to {} - E0595, // closure cannot assign to {} E0598, // lifetime of {} is too short to guarantee its contents can be... } From a2d55146938972d7eecc19f9315f86d7ecb8f94b Mon Sep 17 00:00:00 2001 From: QuietMisdreavus Date: Sun, 30 Jul 2017 14:59:08 -0500 Subject: [PATCH 075/213] add docs for references as a primitive --- src/librustdoc/clean/mod.rs | 8 +++ src/librustdoc/html/format.rs | 36 +++++------ src/libstd/primitive_docs.rs | 117 ++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+), 19 deletions(-) diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 9d0b5b41a913..39258dd3a246 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -1547,6 +1547,7 @@ pub enum PrimitiveType { Array, Tuple, RawPointer, + Reference, } #[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)] @@ -1581,6 +1582,7 @@ impl Type { Array(..) | BorrowedRef { type_: box Array(..), .. } => Some(PrimitiveType::Array), Tuple(..) => Some(PrimitiveType::Tuple), RawPointer(..) => Some(PrimitiveType::RawPointer), + BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference), _ => None, } } @@ -1633,6 +1635,7 @@ impl PrimitiveType { "slice" => Some(PrimitiveType::Slice), "tuple" => Some(PrimitiveType::Tuple), "pointer" => Some(PrimitiveType::RawPointer), + "reference" => Some(PrimitiveType::Reference), _ => None, } } @@ -1661,6 +1664,7 @@ impl PrimitiveType { Slice => "slice", Tuple => "tuple", RawPointer => "pointer", + Reference => "reference", } } @@ -2556,6 +2560,7 @@ fn build_deref_target_impls(cx: &DocContext, Array => tcx.lang_items.slice_impl(), Tuple => None, RawPointer => tcx.lang_items.const_ptr_impl(), + Reference => None, }; if let Some(did) = did { if !did.is_local() { @@ -2777,6 +2782,9 @@ fn resolve_type(cx: &DocContext, Def::SelfTy(..) if path.segments.len() == 1 => { return Generic(keywords::SelfType.name().to_string()); } + Def::TyParam(..) if path.segments.len() == 1 => { + return Generic(format!("{:#}", path)); + } Def::SelfTy(..) | Def::TyParam(..) | Def::AssociatedTy(..) => true, _ => false, }; diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 766e76137ca4..33ab5cf47de2 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -665,26 +665,29 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: _ => "".to_string(), }; let m = MutableSpace(mutability); + let amp = if f.alternate() { + "&".to_string() + } else { + "&".to_string() + }; match **ty { clean::Slice(ref bt) => { // BorrowedRef{ ... Slice(T) } is &[T] match **bt { clean::Generic(_) => { if f.alternate() { primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[{:#}]", lt, m, **bt)) + &format!("{}{}{}[{:#}]", amp, lt, m, **bt)) } else { primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[{}]", lt, m, **bt)) + &format!("{}{}{}[{}]", amp, lt, m, **bt)) } } _ => { + primitive_link(f, PrimitiveType::Slice, + &format!("{}{}{}[", amp, lt, m))?; if f.alternate() { - primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[", lt, m))?; write!(f, "{:#}", **bt)?; } else { - primitive_link(f, PrimitiveType::Slice, - &format!("&{}{}[", lt, m))?; write!(f, "{}", **bt)?; } primitive_link(f, PrimitiveType::Slice, "]") @@ -692,23 +695,18 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: } } clean::ResolvedPath { typarams: Some(ref v), .. } if !v.is_empty() => { - if f.alternate() { - write!(f, "&{}{}", lt, m)?; - } else { - write!(f, "&{}{}", lt, m)?; - } - write!(f, "(")?; + write!(f, "{}{}{}(", amp, lt, m)?; fmt_type(&ty, f, use_absolute)?; write!(f, ")") } + clean::Generic(..) => { + primitive_link(f, PrimitiveType::Reference, + &format!("{}{}{}", amp, lt, m))?; + fmt_type(&ty, f, use_absolute) + } _ => { - if f.alternate() { - write!(f, "&{}{}", lt, m)?; - fmt_type(&ty, f, use_absolute) - } else { - write!(f, "&{}{}", lt, m)?; - fmt_type(&ty, f, use_absolute) - } + write!(f, "{}{}{}", amp, lt, m)?; + fmt_type(&ty, f, use_absolute) } } } diff --git a/src/libstd/primitive_docs.rs b/src/libstd/primitive_docs.rs index 869299e21448..84dba274a2e7 100644 --- a/src/libstd/primitive_docs.rs +++ b/src/libstd/primitive_docs.rs @@ -722,3 +722,120 @@ mod prim_isize { } /// #[stable(feature = "rust1", since = "1.0.0")] mod prim_usize { } + +#[doc(primitive = "reference")] +// +/// References, both shared and mutable. +/// +/// A reference represents a borrow of some owned value. You can get one by using the `&` or `&mut` +/// operators on a value, or by using a `ref` or `ref mut` pattern. +/// +/// For those familiar with pointers, a reference is just a pointer that is assumed to not be null. +/// In fact, `Option<&T>` has the same memory representation as a nullable pointer, and can be +/// passed across FFI boundaries as such. +/// +/// In most cases, references can be used much like the original value. Field access, method +/// calling, and indexing work the same (save for mutability rules, of course). In addition, the +/// comparison operators transparently defer to the referent's implementation, allowing references +/// to be compared the same as owned values. +/// +/// References have a lifetime attached to them, which represents the scope for which the borrow is +/// valid. A lifetime is said to "outlive" another one if its representative scope is as long or +/// longer than the other. The `'static` lifetime is the longest lifetime, which represents the +/// total life of the program. For example, string literals have a `'static` lifetime because the +/// text data is embedded into the binary of the program, rather than in an allocation that needs +/// to be dynamically managed. +/// +/// `&mut T` references can be freely coerced into `&T` references with the same referent type, and +/// references with longer lifetimes can be freely coerced into references with shorter ones. +/// +/// For more information on how to use references, see [the book's section on "References and +/// Borrowing"][book-refs]. +/// +/// [book-refs]: ../book/second-edition/ch04-02-references-and-borrowing.html +/// +/// The following traits are implemented for all `&T`, regardless of the type of its referent: +/// +/// * [`Copy`] +/// * [`Clone`] \(Note that this will not defer to `T`'s `Clone` implementation if it exists!) +/// * [`Deref`] +/// * [`Borrow`] +/// * [`Pointer`] +/// +/// [`Copy`]: marker/trait.Copy.html +/// [`Clone`]: clone/trait.Clone.html +/// [`Deref`]: ops/trait.Deref.html +/// [`Borrow`]: borrow/trait.Borrow.html +/// [`Pointer`]: fmt/trait.Pointer.html +/// +/// `&mut T` references get all of the above except `Copy` and `Clone` (to prevent creating +/// multiple simultaneous mutable borrows), plus the following, regardless of the type of its +/// referent: +/// +/// * [`DerefMut`] +/// * [`BorrowMut`] +/// +/// [`DerefMut`]: ops/trait.DerefMut.html +/// [`BorrowMut`]: borrow/trait.BorrowMut.html +/// +/// The following traits are implemented on `&T` references if the underlying `T` also implements +/// that trait: +/// +/// * All the traits in [`std::fmt`] except [`Pointer`] and [`fmt::Write`] +/// * [`PartialOrd`] +/// * [`Ord`] +/// * [`PartialEq`] +/// * [`Eq`] +/// * [`AsRef`] +/// * [`Fn`] \(in addition, `&T` references get [`FnMut`] and [`FnOnce`] if `T: Fn`) +/// * [`Hash`] +/// * [`ToSocketAddrs`] +/// +/// [`std::fmt`]: fmt/index.html +/// [`fmt::Write`]: fmt/trait.Write.html +/// [`PartialOrd`]: cmp/trait.PartialOrd.html +/// [`Ord`]: cmp/trait.Ord.html +/// [`PartialEq`]: cmp/trait.PartialEq.html +/// [`Eq`]: cmp/trait.Eq.html +/// [`AsRef`]: convert/trait.AsRef.html +/// [`Fn`]: ops/trait.Fn.html +/// [`FnMut`]: ops/trait.FnMut.html +/// [`FnOnce`]: ops/trait.FnOnce.html +/// [`Hash`]: hash/trait.Hash.html +/// [`ToSocketAddrs`]: net/trait.ToSocketAddrs.html +/// +/// `&mut T` references get all of the above except `ToSocketAddrs`, plus the following, if `T` +/// implements that trait: +/// +/// * [`AsMut`] +/// * [`FnMut`] \(in addition, `&mut T` references get [`FnOnce`] if `T: FnMut`) +/// * [`fmt::Write`] +/// * [`Iterator`] +/// * [`DoubleEndedIterator`] +/// * [`ExactSizeIterator`] +/// * [`FusedIterator`] +/// * [`TrustedLen`] +/// * [`Send`] \(note that `&T` references only get `Send` if `T: Sync`) +/// * [`io::Write`] +/// * [`Read`] +/// * [`Seek`] +/// * [`BufRead`] +/// +/// [`AsMut`]: convert/trait.AsMut.html +/// [`Iterator`]: iter/trait.Iterator.html +/// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html +/// [`ExactSizeIterator`]: iter/trait.ExactSizeIterator.html +/// [`FusedIterator`]: iter/trait.FusedIterator.html +/// [`TrustedLen`]: iter/trait.TrustedLen.html +/// [`Send`]: marker/trait.Send.html +/// [`io::Write`]: io/trait.Write.html +/// [`Read`]: io/trait.Read.html +/// [`Seek`]: io/trait.Seek.html +/// [`BufRead`]: io/trait.BufRead.html +/// +/// Note that due to method call deref coercion, simply calling a trait method will act like they +/// work on references as well as they do on owned values! The implementations described here are +/// meant for generic contexts, where the final type `T` is a type parameter or otherwise not +/// locally known. +#[stable(feature = "rust1", since = "1.0.0")] +mod prim_ref { } From eb1c44b6edf20a6a1c20a7d8b1c94ab15c2fe7f4 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sun, 30 Jul 2017 13:48:49 -0700 Subject: [PATCH 076/213] rustbuild: Remove `--enable-llvm-clean-rebuild` This was intended for bots back in the day where we'd persist caches of LLVM builds across runs, but nowadays we don't do that on any of the bots so this option is no longer necessary --- configure | 1 - src/bootstrap/config.rs | 4 ---- src/bootstrap/config.toml.example | 5 ----- src/bootstrap/native.rs | 3 --- src/ci/run.sh | 1 - 5 files changed, 14 deletions(-) diff --git a/configure b/configure index 2b82b5e405b6..664b473b2c9d 100755 --- a/configure +++ b/configure @@ -437,7 +437,6 @@ opt local-rust 0 "use an installed rustc rather than downloading a snapshot" opt local-rebuild 0 "assume local-rust matches the current version, for rebuilds; implies local-rust, and is implied if local-rust already matches the current version" opt llvm-static-stdcpp 0 "statically link to libstdc++ for LLVM" opt llvm-link-shared 0 "prefer shared linking to LLVM (llvm-config --link-shared)" -opt llvm-clean-rebuild 0 "delete LLVM build directory on rebuild" opt rpath 1 "build rpaths into rustc itself" opt stage0-landing-pads 1 "enable landing pads during bootstrap with stage0" # This is used by the automation to produce single-target nightlies diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 7b8af436d5a7..5d898cb716dc 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -62,7 +62,6 @@ pub struct Config { pub llvm_targets: Option, pub llvm_experimental_targets: Option, pub llvm_link_jobs: Option, - pub llvm_clean_rebuild: bool, // rust codegen options pub rust_optimize: bool, @@ -203,7 +202,6 @@ struct Llvm { targets: Option, experimental_targets: Option, link_jobs: Option, - clean_rebuild: Option, } #[derive(Deserialize, Default, Clone)] @@ -352,7 +350,6 @@ impl Config { set(&mut config.llvm_release_debuginfo, llvm.release_debuginfo); set(&mut config.llvm_version_check, llvm.version_check); set(&mut config.llvm_static_stdcpp, llvm.static_libstdcpp); - set(&mut config.llvm_clean_rebuild, llvm.clean_rebuild); config.llvm_targets = llvm.targets.clone(); config.llvm_experimental_targets = llvm.experimental_targets.clone(); config.llvm_link_jobs = llvm.link_jobs; @@ -477,7 +474,6 @@ impl Config { ("LLVM_VERSION_CHECK", self.llvm_version_check), ("LLVM_STATIC_STDCPP", self.llvm_static_stdcpp), ("LLVM_LINK_SHARED", self.llvm_link_shared), - ("LLVM_CLEAN_REBUILD", self.llvm_clean_rebuild), ("OPTIMIZE", self.rust_optimize), ("DEBUG_ASSERTIONS", self.rust_debug_assertions), ("DEBUGINFO", self.rust_debuginfo), diff --git a/src/bootstrap/config.toml.example b/src/bootstrap/config.toml.example index 7a52222e46e9..bf4786ba552a 100644 --- a/src/bootstrap/config.toml.example +++ b/src/bootstrap/config.toml.example @@ -69,11 +69,6 @@ # controlled by rustbuild's -j parameter. #link-jobs = 0 -# Delete LLVM build directory on LLVM rebuild. -# This option defaults to `false` for local development, but CI may want to -# always perform clean full builds (possibly accelerated by (s)ccache). -#clean-rebuild = false - # ============================================================================= # General build configuration options # ============================================================================= diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index 1da277cf1812..cfd20b02aaf6 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -80,9 +80,6 @@ impl Step for Llvm { return } } - if build.config.llvm_clean_rebuild { - drop(fs::remove_dir_all(&out_dir)); - } let _folder = build.fold_output(|| "llvm"); println!("Building LLVM for {}", target); diff --git a/src/ci/run.sh b/src/ci/run.sh index ccf0bb1ffb70..39fb4e440781 100755 --- a/src/ci/run.sh +++ b/src/ci/run.sh @@ -31,7 +31,6 @@ RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-sccache" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-manage-submodules" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-locked-deps" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-cargo-openssl-static" -RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-clean-rebuild" if [ "$DIST_SRC" = "" ]; then RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-dist-src" From bbb5311ad692819f979e9c9b1cd506b63c901378 Mon Sep 17 00:00:00 2001 From: Tobias Bucher Date: Sun, 30 Jul 2017 23:20:54 +0200 Subject: [PATCH 077/213] Document the `from_str_radix` panic CC #42034 --- src/libcore/num/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index d533310625e5..c5175287ccfa 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -131,6 +131,10 @@ macro_rules! int_impl { /// /// Leading and trailing whitespace represent an error. /// + /// # Panics + /// + /// This function panics if `radix` is not in the range from 2 to 36. + /// /// # Examples /// /// Basic usage: From 83eb264273fe7ace01b2100c116daa36f06920b8 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Sat, 29 Jul 2017 20:30:44 +0300 Subject: [PATCH 078/213] borrowck: skip CFG construction when there is nothing to propagate CFG construction takes a large amount of time and memory, especially for large constants. If such a constant contains no actions on lvalues, it can't have borrowck problems and can be ignored by it. This removes the 4.9GB borrowck peak from #36799. It seems that HIR had grown by 300MB and MIR had grown by 500MB from the last massif collection and that remains to be investigated, but this at least shaves the borrowck peak. --- src/librustc_borrowck/borrowck/mod.rs | 53 ++++++++++++++------- src/librustc_borrowck/borrowck/move_data.rs | 9 ++++ 2 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 1bfc5805bc8f..16ecfce49d0d 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -112,19 +112,28 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) { // is not yet stolen. tcx.mir_validated(owner_def_id).borrow(); - let cfg = cfg::CFG::new(bccx.tcx, &body); - let AnalysisData { all_loans, - loans: loan_dfcx, - move_data: flowed_moves } = - build_borrowck_dataflow_data(bccx, &cfg, body_id); - - check_loans::check_loans(bccx, &loan_dfcx, &flowed_moves, &all_loans, body); + // option dance because you can't capture an uninitialized variable + // by mut-ref. + let mut cfg = None; + if let Some(AnalysisData { all_loans, + loans: loan_dfcx, + move_data: flowed_moves }) = + build_borrowck_dataflow_data(bccx, false, body_id, + |bccx| { + cfg = Some(cfg::CFG::new(bccx.tcx, &body)); + cfg.as_mut().unwrap() + }) + { + check_loans::check_loans(bccx, &loan_dfcx, &flowed_moves, &all_loans, body); + } } -fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, - cfg: &cfg::CFG, - body_id: hir::BodyId) - -> AnalysisData<'a, 'tcx> +fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tcx>, + force_analysis: bool, + body_id: hir::BodyId, + get_cfg: F) + -> Option> + where F: FnOnce(&mut BorrowckCtxt<'a, 'tcx>) -> &'c cfg::CFG { // Check the body of fn items. let tcx = this.tcx; @@ -137,6 +146,18 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, let (all_loans, move_data) = gather_loans::gather_loans_in_fn(this, body_id); + if !force_analysis && move_data.is_empty() && all_loans.is_empty() { + // large arrays of data inserted as constants can take a lot of + // time and memory to borrow-check - see issue #36799. However, + // they don't have lvalues, so no borrow-check is actually needed. + // Recognize that case and skip borrow-checking. + debug!("skipping loan propagation for {:?} because of no loans", body_id); + return None; + } else { + debug!("propagating loans in {:?}", body_id); + } + + let cfg = get_cfg(this); let mut loan_dfcx = DataFlowContext::new(this.tcx, "borrowck", @@ -159,9 +180,9 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, id_range, body); - AnalysisData { all_loans: all_loans, - loans: loan_dfcx, - move_data:flowed_moves } + Some(AnalysisData { all_loans: all_loans, + loans: loan_dfcx, + move_data:flowed_moves }) } /// Accessor for introspective clients inspecting `AnalysisData` and @@ -178,8 +199,8 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>( let region_maps = tcx.region_maps(owner_def_id); let mut bccx = BorrowckCtxt { tcx, tables, region_maps, owner_def_id }; - let dataflow_data = build_borrowck_dataflow_data(&mut bccx, cfg, body_id); - (bccx, dataflow_data) + let dataflow_data = build_borrowck_dataflow_data(&mut bccx, true, body_id, |_| cfg); + (bccx, dataflow_data.unwrap()) } // ---------------------------------------------------------------------- diff --git a/src/librustc_borrowck/borrowck/move_data.rs b/src/librustc_borrowck/borrowck/move_data.rs index 0a31905c7928..fd80e8320d6a 100644 --- a/src/librustc_borrowck/borrowck/move_data.rs +++ b/src/librustc_borrowck/borrowck/move_data.rs @@ -220,6 +220,15 @@ impl<'a, 'tcx> MoveData<'tcx> { } } + /// return true if there are no trackable assignments or moves + /// in this move data - that means that there is nothing that + /// could cause a borrow error. + pub fn is_empty(&self) -> bool { + self.moves.borrow().is_empty() && + self.path_assignments.borrow().is_empty() && + self.var_assignments.borrow().is_empty() + } + pub fn path_loan_path(&self, index: MovePathIndex) -> Rc> { (*self.paths.borrow())[index.get()].loan_path.clone() } From c4adeceb37e4bdf9be3b70ff2454b121531466ce Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Fri, 21 Jul 2017 15:14:21 +0200 Subject: [PATCH 079/213] async-llvm(1): Run LLVM already in trans_crate(). --- src/librustc_driver/driver.rs | 57 +++++---------------------- src/librustc_trans/back/write.rs | 12 ++---- src/librustc_trans/base.rs | 66 ++++++++++++++++++++++++-------- src/librustc_trans/lib.rs | 66 +++++++++++++++++++++++++++++++- 4 files changed, 129 insertions(+), 72 deletions(-) diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index c592882a1e43..1bc3f59ed047 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -15,8 +15,7 @@ use rustc_data_structures::stable_hasher::StableHasher; use rustc_mir as mir; use rustc::session::{Session, CompileResult}; use rustc::session::CompileIncomplete; -use rustc::session::config::{self, Input, OutputFilenames, OutputType, - OutputTypes}; +use rustc::session::config::{self, Input, OutputFilenames, OutputType}; use rustc::session::search_paths::PathKind; use rustc::lint; use rustc::middle::{self, dependency_format, stability, reachable}; @@ -26,7 +25,6 @@ use rustc::ty::{self, TyCtxt, Resolutions, GlobalArenas}; use rustc::traits; use rustc::util::common::{ErrorReported, time}; use rustc::util::nodemap::NodeSet; -use rustc::util::fs::rename_or_copy_remove; use rustc_allocator as allocator; use rustc_borrowck as borrowck; use rustc_incremental::{self, IncrementalHashesMap}; @@ -231,7 +229,7 @@ pub fn compile_input(sess: &Session, sess.code_stats.borrow().print_type_sizes(); } - let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs); + let (phase5_result, trans) = phase_5_run_llvm_passes(sess, trans, &outputs); controller_entry_point!(after_llvm, sess, @@ -1057,7 +1055,7 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis: ty::CrateAnalysis, incremental_hashes_map: &IncrementalHashesMap, output_filenames: &OutputFilenames) - -> trans::CrateTranslation { + -> trans::OngoingCrateTranslation { let time_passes = tcx.sess.time_passes(); time(time_passes, @@ -1069,61 +1067,26 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "translation", move || trans::trans_crate(tcx, analysis, &incremental_hashes_map, output_filenames)); - time(time_passes, - "assert dep graph", - || rustc_incremental::assert_dep_graph(tcx)); - - time(time_passes, - "serialize dep graph", - || rustc_incremental::save_dep_graph(tcx, - &incremental_hashes_map, - &translation.metadata.hashes, - translation.link.crate_hash)); translation } /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. pub fn phase_5_run_llvm_passes(sess: &Session, - trans: &trans::CrateTranslation, - outputs: &OutputFilenames) -> CompileResult { - if sess.opts.cg.no_integrated_as || - (sess.target.target.options.no_integrated_as && - (outputs.outputs.contains_key(&OutputType::Object) || - outputs.outputs.contains_key(&OutputType::Exe))) - { - let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]); - time(sess.time_passes(), - "LLVM passes", - || write::run_passes(sess, trans, &output_types, outputs)); + trans: trans::OngoingCrateTranslation, + outputs: &OutputFilenames) + -> (CompileResult, trans::CrateTranslation) { + let trans = trans.join(sess, outputs); - write::run_assembler(sess, outputs); - - // HACK the linker expects the object file to be named foo.0.o but - // `run_assembler` produces an object named just foo.o. Rename it if we - // are going to build an executable - if sess.opts.output_types.contains_key(&OutputType::Exe) { - let f = outputs.path(OutputType::Object); - rename_or_copy_remove(&f, - f.with_file_name(format!("{}.0.o", - f.file_stem().unwrap().to_string_lossy()))).unwrap(); - } - - // Remove assembly source, unless --save-temps was specified - if !sess.opts.cg.save_temps { - fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap(); - } - } else { - time(sess.time_passes(), - "LLVM passes", - || write::run_passes(sess, trans, &sess.opts.output_types, outputs)); + if sess.opts.debugging_opts.incremental_info { + write::dump_incremental_data(&trans); } time(sess.time_passes(), "serialize work products", move || rustc_incremental::save_work_products(sess)); - sess.compile_status() + (sess.compile_status(), trans) } /// Run the linker on any artifacts that resulted from the LLVM run. diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 26553c85023b..4af4ee664a25 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -18,7 +18,7 @@ use rustc::session::Session; use llvm; use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef}; use llvm::SMDiagnosticRef; -use {CrateTranslation, ModuleLlvm, ModuleSource, ModuleTranslation}; +use {CrateTranslation, OngoingCrateTranslation, ModuleLlvm, ModuleSource, ModuleTranslation}; use rustc::hir::def_id::CrateNum; use rustc::util::common::{time, time_depth, set_time_depth, path2cstr}; use rustc::util::fs::link_or_copy; @@ -255,7 +255,7 @@ impl ModuleConfig { } } - fn set_flags(&mut self, sess: &Session, trans: &CrateTranslation) { + fn set_flags(&mut self, sess: &Session, trans: &OngoingCrateTranslation) { self.no_verify = sess.no_verify(); self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; self.no_builtins = trans.no_builtins; @@ -614,7 +614,7 @@ pub fn cleanup_llvm(trans: &CrateTranslation) { } pub fn run_passes(sess: &Session, - trans: &CrateTranslation, + trans: &OngoingCrateTranslation, output_types: &OutputTypes, crate_output: &OutputFilenames) { // It's possible that we have `codegen_units > 1` but only one item in @@ -748,10 +748,6 @@ pub fn run_passes(sess: &Session, work_items.push(work); } - if sess.opts.debugging_opts.incremental_info { - dump_incremental_data(&trans); - } - let client = sess.jobserver_from_env.clone().unwrap_or_else(|| { // Pick a "reasonable maximum" if we don't otherwise have a jobserver in // our environment, capping out at 32 so we don't take everything down @@ -938,7 +934,7 @@ pub fn run_passes(sess: &Session, } } -fn dump_incremental_data(trans: &CrateTranslation) { +pub fn dump_incremental_data(trans: &CrateTranslation) { let mut reuse = 0; for mtrans in trans.modules.iter() { match mtrans.source { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 7b836399f9cb..1fd871d31b59 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -23,7 +23,7 @@ //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int, //! int) and rec(x=int, y=int, z=int) will have the same TypeRef. -use super::CrateTranslation; +use super::OngoingCrateTranslation; use super::ModuleLlvm; use super::ModuleSource; use super::ModuleTranslation; @@ -43,9 +43,9 @@ use rustc::dep_graph::AssertDepGraphSafe; use rustc::middle::cstore::LinkMeta; use rustc::hir::map as hir_map; use rustc::util::common::time; -use rustc::session::config::{self, NoDebugInfo, OutputFilenames}; +use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType, OutputTypes}; use rustc::session::Session; -use rustc_incremental::IncrementalHashesMap; +use rustc_incremental::{self, IncrementalHashesMap}; use abi; use allocator; use mir::lvalue::LvalueRef; @@ -922,7 +922,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis: ty::CrateAnalysis, incremental_hashes_map: &IncrementalHashesMap, output_filenames: &OutputFilenames) - -> CrateTranslation { + -> OngoingCrateTranslation { // Be careful with this krate: obviously it gives access to the // entire contents of the krate. So if you push any subtasks of // `TransCrate`, you need to be careful to register "reads" of the @@ -961,17 +961,18 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, !tcx.sess.opts.output_types.should_trans() { let empty_exported_symbols = ExportedSymbols::empty(); let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); - return CrateTranslation { + return OngoingCrateTranslation { crate_name: tcx.crate_name(LOCAL_CRATE), modules: vec![], metadata_module: metadata_module, allocator_module: None, link: link_meta, metadata: metadata, - exported_symbols: empty_exported_symbols, + exported_symbols: Arc::new(empty_exported_symbols), no_builtins: no_builtins, linker_info: linker_info, windows_subsystem: None, + no_integrated_as: false, }; } @@ -1210,19 +1211,52 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, subsystem.to_string() }); - CrateTranslation { + let outputs = output_filenames; + + let no_integrated_as = sess.opts.cg.no_integrated_as || + (sess.target.target.options.no_integrated_as && + (outputs.outputs.contains_key(&OutputType::Object) || + outputs.outputs.contains_key(&OutputType::Exe))); + + let crate_translation = OngoingCrateTranslation { crate_name: tcx.crate_name(LOCAL_CRATE), - modules: modules, - metadata_module: metadata_module, - allocator_module: allocator_module, link: link_meta, metadata: metadata, - exported_symbols: Arc::try_unwrap(exported_symbols) - .expect("There's still a reference to exported_symbols?"), - no_builtins: no_builtins, - linker_info: linker_info, - windows_subsystem: windows_subsystem, - } + exported_symbols, + no_builtins, + linker_info, + windows_subsystem, + no_integrated_as, + + modules, + metadata_module, + allocator_module, + }; + + time(sess.time_passes(), + "assert dep graph", + || rustc_incremental::assert_dep_graph(tcx)); + + time(sess.time_passes(), + "serialize dep graph", + || rustc_incremental::save_dep_graph(tcx, + incremental_hashes_map, + &crate_translation.metadata.hashes, + crate_translation.link.crate_hash)); + // --- + + if no_integrated_as { + let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]); + time(sess.time_passes(), + "LLVM passes", + || ::back::write::run_passes(sess, &crate_translation, &output_types, outputs)) + } else { + time(sess.time_passes(), + "LLVM passes", + || ::back::write::run_passes(sess, &crate_translation, &sess.opts.output_types, outputs)) + }; + + crate_translation } #[inline(never)] // give this a place in the profiler diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 70337a91731d..c386d11fa84c 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -35,7 +35,12 @@ #![feature(conservative_impl_trait)] use rustc::dep_graph::WorkProduct; +use rustc::session::Session; +use rustc::session::config::{OutputType, OutputFilenames}; +use rustc::util::fs::rename_or_copy_remove; use syntax_pos::symbol::Symbol; +use std::fs; +use std::sync::Arc; extern crate flate2; extern crate crossbeam; @@ -167,10 +172,69 @@ pub struct CrateTranslation { pub allocator_module: Option, pub link: rustc::middle::cstore::LinkMeta, pub metadata: rustc::middle::cstore::EncodedMetadata, - pub exported_symbols: back::symbol_export::ExportedSymbols, + pub exported_symbols: Arc, pub no_builtins: bool, pub windows_subsystem: Option, pub linker_info: back::linker::LinkerInfo } +pub struct OngoingCrateTranslation { + pub crate_name: Symbol, + pub link: rustc::middle::cstore::LinkMeta, + pub metadata: rustc::middle::cstore::EncodedMetadata, + pub exported_symbols: Arc, + pub no_builtins: bool, + pub windows_subsystem: Option, + pub linker_info: back::linker::LinkerInfo, + pub no_integrated_as: bool, + + // These will be replaced by a Future. + pub modules: Vec, + pub metadata_module: ModuleTranslation, + pub allocator_module: Option, +} + +impl OngoingCrateTranslation { + pub fn join(self, + sess: &Session, + outputs: &OutputFilenames) + -> CrateTranslation { + + let trans = CrateTranslation { + crate_name: self.crate_name, + link: self.link, + metadata: self.metadata, + exported_symbols: self.exported_symbols, + no_builtins: self.no_builtins, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + + modules: self.modules, + metadata_module: self.metadata_module, + allocator_module: self.allocator_module, + }; + + if self.no_integrated_as { + back::write::run_assembler(sess, outputs); + + // HACK the linker expects the object file to be named foo.0.o but + // `run_assembler` produces an object named just foo.o. Rename it if we + // are going to build an executable + if sess.opts.output_types.contains_key(&OutputType::Exe) { + let f = outputs.path(OutputType::Object); + rename_or_copy_remove(&f, + f.with_file_name(format!("{}.0.o", + f.file_stem().unwrap().to_string_lossy()))).unwrap(); + } + + // Remove assembly source, unless --save-temps was specified + if !sess.opts.cg.save_temps { + fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap(); + } + } + + trans + } +} + __build_diagnostic_array! { librustc_trans, DIAGNOSTICS } From 29d4725b31bee27f025c17320f0eb59c5fc7af3b Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Fri, 21 Jul 2017 17:15:18 +0200 Subject: [PATCH 080/213] async-llvm(2): Decouple diagnostics emission from LLVM worker coordination. --- src/librustc_trans/back/write.rs | 350 +++++++++++++++++++------------ src/librustc_trans/base.rs | 28 ++- src/librustc_trans/lib.rs | 14 +- 3 files changed, 238 insertions(+), 154 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 4af4ee664a25..28d255d42045 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -23,7 +23,7 @@ use rustc::hir::def_id::CrateNum; use rustc::util::common::{time, time_depth, set_time_depth, path2cstr}; use rustc::util::fs::link_or_copy; use errors::{self, Handler, Level, DiagnosticBuilder, FatalError}; -use errors::emitter::Emitter; +use errors::emitter::{Emitter}; use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; use context::{is_pie_binary, get_reloc_model}; @@ -38,7 +38,7 @@ use std::io; use std::io::Write; use std::path::{Path, PathBuf}; use std::str; -use std::sync::mpsc::{channel, Sender}; +use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; use libc::{c_uint, c_void, c_char, size_t}; @@ -304,6 +304,9 @@ pub struct CodegenContext<'a> { pub incr_comp_session_dir: Option, // Channel back to the main control thread to send messages to pub tx: Sender, + + // Error messages... + pub shared_emitter: SharedEmitter, } struct HandlerFreeVars<'a> { @@ -313,7 +316,7 @@ struct HandlerFreeVars<'a> { unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext<'a>, msg: &'b str, cookie: c_uint) { - drop(cgcx.tx.send(Message::InlineAsmError(cookie as u32, msg.to_string()))); + cgcx.shared_emitter.inline_asm_error(cookie as u32, msg.to_string()); } unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, @@ -613,8 +616,17 @@ pub fn cleanup_llvm(trans: &CrateTranslation) { } } +pub struct RunLLVMPassesResult { + pub modules: Vec, + pub metadata_module: ModuleTranslation, + pub allocator_module: Option, +} + pub fn run_passes(sess: &Session, trans: &OngoingCrateTranslation, + modules: Vec, + metadata_module: ModuleTranslation, + allocator_module: Option, output_types: &OutputTypes, crate_output: &OutputFilenames) { // It's possible that we have `codegen_units > 1` but only one item in @@ -631,7 +643,7 @@ pub fn run_passes(sess: &Session, } // Sanity check - assert!(trans.modules.len() == sess.opts.cg.codegen_units || + assert!(modules.len() == sess.opts.cg.codegen_units || sess.opts.debugging_opts.incremental.is_some() || !sess.opts.output_types.should_trans() || sess.opts.debugging_opts.no_trans); @@ -722,17 +734,17 @@ pub fn run_passes(sess: &Session, // Populate a buffer with a list of codegen threads. Items are processed in // LIFO order, just because it's a tiny bit simpler that way. (The order // doesn't actually matter.) - let mut work_items = Vec::with_capacity(1 + trans.modules.len()); + let mut work_items = Vec::with_capacity(1 + modules.len()); { let work = build_work_item(sess, - trans.metadata_module.clone(), + metadata_module.clone(), metadata_config.clone(), crate_output.clone()); work_items.push(work); } - if let Some(allocator) = trans.allocator_module.clone() { + if let Some(allocator) = allocator_module.clone() { let work = build_work_item(sess, allocator, allocator_config.clone(), @@ -740,7 +752,7 @@ pub fn run_passes(sess: &Session, work_items.push(work); } - for mtrans in trans.modules.iter() { + for mtrans in modules.iter() { let work = build_work_item(sess, mtrans.clone(), modules_config.clone(), @@ -760,7 +772,7 @@ pub fn run_passes(sess: &Session, }); // If in incr. comp. mode, preserve the `.o` files for potential re-use - for mtrans in trans.modules.iter() { + for mtrans in modules.iter() { let mut files = vec![]; if modules_config.emit_obj { @@ -781,80 +793,82 @@ pub fn run_passes(sess: &Session, llvm::LLVMRustDisposeTargetMachine(tm); } - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &Path| { - if let Err(e) = fs::copy(from, to) { - sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); - } - }; - - let copy_if_one_unit = |output_type: OutputType, - keep_numbered: bool| { - if trans.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&trans.modules[0].name[..]); - let path = crate_output.temp_path(output_type, module_name); - copy_gracefully(&path, - &crate_output.path(output_type)); - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - remove(sess, &path); - } - } else { - let ext = crate_output.temp_path(output_type, None) - .extension() - .unwrap() - .to_str() - .unwrap() - .to_owned(); - - if crate_output.outputs.contains_key(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring emit path because multiple .{} files \ - were produced", ext)); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring -o because multiple .{} files \ - were produced", ext)); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; - - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. let mut user_wants_bitcode = false; let mut user_wants_objects = false; - for output_type in output_types.keys() { - match *output_type { - OutputType::Bitcode => { - user_wants_bitcode = true; - // Copy to .bc, but always keep the .0.bc. There is a later - // check to figure out if we should delete .0.bc files, or keep - // them for making an rlib. - copy_if_one_unit(OutputType::Bitcode, true); + { + // Produce final compile outputs. + let copy_gracefully = |from: &Path, to: &Path| { + if let Err(e) = fs::copy(from, to) { + sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); } - OutputType::LlvmAssembly => { - copy_if_one_unit(OutputType::LlvmAssembly, false); + }; + + let copy_if_one_unit = |output_type: OutputType, + keep_numbered: bool| { + if modules.len() == 1 { + // 1) Only one codegen unit. In this case it's no difficulty + // to copy `foo.0.x` to `foo.x`. + let module_name = Some(&modules[0].name[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, + &crate_output.path(output_type)); + if !sess.opts.cg.save_temps && !keep_numbered { + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); + } + } else { + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); + } else { + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) + } } - OutputType::Assembly => { - copy_if_one_unit(OutputType::Assembly, false); + }; + + // Flag to indicate whether the user explicitly requested bitcode. + // Otherwise, we produced it only as a temporary output, and will need + // to get rid of it. + for output_type in output_types.keys() { + match *output_type { + OutputType::Bitcode => { + user_wants_bitcode = true; + // Copy to .bc, but always keep the .0.bc. There is a later + // check to figure out if we should delete .0.bc files, or keep + // them for making an rlib. + copy_if_one_unit(OutputType::Bitcode, true); + } + OutputType::LlvmAssembly => { + copy_if_one_unit(OutputType::LlvmAssembly, false); + } + OutputType::Assembly => { + copy_if_one_unit(OutputType::Assembly, false); + } + OutputType::Object => { + user_wants_objects = true; + copy_if_one_unit(OutputType::Object, true); + } + OutputType::Mir | + OutputType::Metadata | + OutputType::Exe | + OutputType::DepInfo => {} } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | - OutputType::Metadata | - OutputType::Exe | - OutputType::DepInfo => {} } } let user_wants_bitcode = user_wants_bitcode; @@ -895,7 +909,7 @@ pub fn run_passes(sess: &Session, let keep_numbered_objects = needs_crate_object || (user_wants_objects && sess.opts.cg.codegen_units > 1); - for module_name in trans.modules.iter().map(|m| Some(&m.name[..])) { + for module_name in modules.iter().map(|m| Some(&m.name[..])) { if modules_config.emit_obj && !keep_numbered_objects { let path = crate_output.temp_path(OutputType::Object, module_name); remove(sess, &path); @@ -909,11 +923,11 @@ pub fn run_passes(sess: &Session, if metadata_config.emit_bc && !user_wants_bitcode { let path = crate_output.temp_path(OutputType::Bitcode, - Some(&trans.metadata_module.name)); + Some(&metadata_module.name)); remove(sess, &path); } if allocator_config.emit_bc && !user_wants_bitcode { - if let Some(ref module) = trans.allocator_module { + if let Some(ref module) = allocator_module { let path = crate_output.temp_path(OutputType::Bitcode, Some(&module.name)); remove(sess, &path); @@ -932,6 +946,14 @@ pub fn run_passes(sess: &Session, if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { unsafe { llvm::LLVMRustPrintPassTimings(); } } + + *trans.result.borrow_mut() = Some( + RunLLVMPassesResult { + modules, + metadata_module, + allocator_module, + } + ); } pub fn dump_incremental_data(trans: &CrateTranslation) { @@ -1011,10 +1033,7 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) pub enum Message { Token(io::Result), - Diagnostic(Diagnostic), Done { success: bool }, - InlineAsmError(u32, String), - AbortIfErrors, } pub struct Diagnostic { @@ -1048,6 +1067,8 @@ fn execute_work<'a>(sess: &'a Session, helper.request_token(); } + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + // This is the "main loop" of parallel work happening for parallel codegen. // It's here that we manage parallelism, schedule work, and work with // messages coming from clients. @@ -1112,7 +1133,13 @@ fn execute_work<'a>(sess: &'a Session, while work_items.len() > 0 && running < tokens.len() + 1 { let item = work_items.pop().unwrap(); let index = work_items.len(); - spawn_work(sess, exported_symbols, scope, tx.clone(), item, index); + spawn_work(sess, + exported_symbols, + scope, + tx.clone(), + shared_emitter.clone(), + item, + index); running += 1; } @@ -1140,70 +1167,22 @@ fn execute_work<'a>(sess: &'a Session, running -= 1; } Message::Done { success: false } => { - sess.fatal("aborting due to worker thread panic"); + shared_emitter.fatal("aborting due to worker thread panic".to_string()); } - - // Our worker wants us to emit an error message, so get ahold of our - // `sess` and print it out - Message::Diagnostic(diag) => { - let handler = sess.diagnostic(); - match diag.code { - Some(ref code) => { - handler.emit_with_code(&MultiSpan::new(), - &diag.msg, - &code, - diag.lvl); - } - None => { - handler.emit(&MultiSpan::new(), - &diag.msg, - diag.lvl); - } - } - } - Message::InlineAsmError(cookie, msg) => { - match Mark::from_u32(cookie).expn_info() { - Some(ei) => sess.span_err(ei.call_site, &msg), - None => sess.err(&msg), - } - } - - // Sent to us after a worker sends us a batch of error messages, and - // it's the point at which we check for errors. - Message::AbortIfErrors => sess.diagnostic().abort_if_errors(), } + + shared_emitter_main.check(sess); } // Just in case, check this on the way out. sess.diagnostic().abort_if_errors(); } -struct SharedEmitter { - tx: Sender, -} - -impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - drop(self.tx.send(Message::Diagnostic(Diagnostic { - msg: db.message(), - code: db.code.clone(), - lvl: db.level, - }))); - for child in &db.children { - drop(self.tx.send(Message::Diagnostic(Diagnostic { - msg: child.message(), - code: None, - lvl: child.level, - }))); - } - drop(self.tx.send(Message::AbortIfErrors)); - } -} - fn spawn_work<'a>(sess: &'a Session, exported_symbols: &'a ExportedSymbols, scope: &Scope<'a>, tx: Sender, + emitter: SharedEmitter, work: WorkItem, idx: usize) { let plugin_passes = sess.plugin_llvm_passes.borrow().clone(); @@ -1244,8 +1223,8 @@ fn spawn_work<'a>(sess: &'a Session, // Set up our non-`Send` `CodegenContext` now that we're in a helper // thread and have all our info available to us. - let emitter = SharedEmitter { tx: tx.clone() }; - let diag_handler = Handler::with_emitter(true, false, Box::new(emitter)); + // let emitter = SharedEmitter { tx: tx.clone() }; + let diag_handler = Handler::with_emitter(true, false, Box::new(emitter.clone())); let cgcx = CodegenContext { crate_types: crate_types, @@ -1261,6 +1240,7 @@ fn spawn_work<'a>(sess: &'a Session, worker: idx, incr_comp_session_dir: incr_comp_session_dir, tx: tx.clone(), + shared_emitter: emitter, }; // Execute the work itself, and if it finishes successfully then flag @@ -1371,3 +1351,95 @@ pub unsafe fn with_llvm_pmb(llmod: ModuleRef, f(builder); llvm::LLVMPassManagerBuilderDispose(builder); } + + +enum SharedEmitterMessage { + Diagnostic(Diagnostic), + InlineAsmError(u32, String), + AbortIfErrors, + Fatal(String), +} + +#[derive(Clone)] +pub struct SharedEmitter { + sender: Sender, +} + +pub struct SharedEmitterMain { + receiver: Receiver, +} + +impl SharedEmitter { + pub fn new() -> (SharedEmitter, SharedEmitterMain) { + let (sender, receiver) = channel(); + + (SharedEmitter { sender }, SharedEmitterMain { receiver }) + } + + fn inline_asm_error(&self, cookie: u32, msg: String) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); + } + + fn fatal(&self, msg: String) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg))); + } +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); + } + drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); + } +} + +impl SharedEmitterMain { + pub fn check(&self, sess: &Session) { + loop { + match self.receiver.try_recv() { + Ok(SharedEmitterMessage::Diagnostic(diag)) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + &code, + diag.lvl); + } + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), + } + } + Ok(SharedEmitterMessage::AbortIfErrors) => { + sess.abort_if_errors(); + } + Ok(SharedEmitterMessage::Fatal(msg)) => { + sess.fatal(&msg); + } + Err(_) => { + break; + } + } + + } + } +} diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 1fd871d31b59..5582079c78f1 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -963,9 +963,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); return OngoingCrateTranslation { crate_name: tcx.crate_name(LOCAL_CRATE), - modules: vec![], - metadata_module: metadata_module, - allocator_module: None, link: link_meta, metadata: metadata, exported_symbols: Arc::new(empty_exported_symbols), @@ -973,6 +970,11 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, linker_info: linker_info, windows_subsystem: None, no_integrated_as: false, + result: ::std::cell::RefCell::new(Some(::back::write::RunLLVMPassesResult { + modules: vec![], + metadata_module: metadata_module, + allocator_module: None, + })), }; } @@ -1228,9 +1230,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, windows_subsystem, no_integrated_as, - modules, - metadata_module, - allocator_module, + result: ::std::cell::RefCell::new(None), }; time(sess.time_passes(), @@ -1249,11 +1249,23 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]); time(sess.time_passes(), "LLVM passes", - || ::back::write::run_passes(sess, &crate_translation, &output_types, outputs)) + || ::back::write::run_passes(sess, + &crate_translation, + modules, + metadata_module, + allocator_module, + &output_types, + outputs)) } else { time(sess.time_passes(), "LLVM passes", - || ::back::write::run_passes(sess, &crate_translation, &sess.opts.output_types, outputs)) + || ::back::write::run_passes(sess, + &crate_translation, + modules, + metadata_module, + allocator_module, + &sess.opts.output_types, + outputs)) }; crate_translation diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index c386d11fa84c..e7debe3919b4 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -188,10 +188,8 @@ pub struct OngoingCrateTranslation { pub linker_info: back::linker::LinkerInfo, pub no_integrated_as: bool, - // These will be replaced by a Future. - pub modules: Vec, - pub metadata_module: ModuleTranslation, - pub allocator_module: Option, + // This will be replaced by a Future. + pub result: ::std::cell::RefCell>, } impl OngoingCrateTranslation { @@ -200,6 +198,8 @@ impl OngoingCrateTranslation { outputs: &OutputFilenames) -> CrateTranslation { + let result = self.result.borrow_mut().take().unwrap(); + let trans = CrateTranslation { crate_name: self.crate_name, link: self.link, @@ -209,9 +209,9 @@ impl OngoingCrateTranslation { windows_subsystem: self.windows_subsystem, linker_info: self.linker_info, - modules: self.modules, - metadata_module: self.metadata_module, - allocator_module: self.allocator_module, + modules: result.modules, + metadata_module: result.metadata_module, + allocator_module: result.allocator_module, }; if self.no_integrated_as { From bac57cf65430ce52bfa6a50e7f4db1b99c02d1cb Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 24 Jul 2017 13:54:18 +0200 Subject: [PATCH 081/213] async-llvm(3): Make write::CodegenContext Clone and Send. --- src/librustc_trans/back/lto.rs | 26 +++--- src/librustc_trans/back/write.rs | 134 +++++++++++++++---------------- 2 files changed, 78 insertions(+), 82 deletions(-) diff --git a/src/librustc_trans/back/lto.rs b/src/librustc_trans/back/lto.rs index feed127b0b60..e160d6b6c6ab 100644 --- a/src/librustc_trans/back/lto.rs +++ b/src/librustc_trans/back/lto.rs @@ -12,7 +12,7 @@ use back::link; use back::write; use back::symbol_export; use rustc::session::config; -use errors::FatalError; +use errors::{FatalError, Handler}; use llvm; use llvm::archive_ro::ArchiveRO; use llvm::{ModuleRef, TargetMachineRef, True, False}; @@ -41,24 +41,24 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } pub fn run(cgcx: &CodegenContext, + diag_handler: &Handler, llmod: ModuleRef, tm: TargetMachineRef, config: &ModuleConfig, temp_no_opt_bc_filename: &Path) -> Result<(), FatalError> { - let handler = cgcx.handler; if cgcx.opts.cg.prefer_dynamic { - handler.struct_err("cannot prefer dynamic linking when performing LTO") - .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ - supported with LTO") - .emit(); + diag_handler.struct_err("cannot prefer dynamic linking when performing LTO") + .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ + supported with LTO") + .emit(); return Err(FatalError) } // Make sure we actually can run LTO for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { - let e = handler.fatal("lto can only be run for executables, cdylibs and \ - static library outputs"); + let e = diag_handler.fatal("lto can only be run for executables, cdylibs and \ + static library outputs"); return Err(e) } } @@ -116,13 +116,13 @@ pub fn run(cgcx: &CodegenContext, if res.is_err() { let msg = format!("failed to decompress bc of `{}`", name); - Err(handler.fatal(&msg)) + Err(diag_handler.fatal(&msg)) } else { Ok(inflated) } } else { - Err(handler.fatal(&format!("Unsupported bytecode format version {}", - version))) + Err(diag_handler.fatal(&format!("Unsupported bytecode format version {}", + version))) } })? } else { @@ -136,7 +136,7 @@ pub fn run(cgcx: &CodegenContext, if res.is_err() { let msg = format!("failed to decompress bc of `{}`", name); - Err(handler.fatal(&msg)) + Err(diag_handler.fatal(&msg)) } else { Ok(inflated) } @@ -152,7 +152,7 @@ pub fn run(cgcx: &CodegenContext, Ok(()) } else { let msg = format!("failed to load bc of `{}`", name); - Err(write::llvm_err(handler, msg)) + Err(write::llvm_err(&diag_handler, msg)) } })?; } diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 28d255d42045..d2fc96828527 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -282,6 +282,7 @@ impl ModuleConfig { } /// Additional resources used by optimize_and_codegen (not module specific) +#[derive(Clone)] pub struct CodegenContext<'a> { // Resouces needed when running LTO pub time_passes: bool, @@ -292,7 +293,7 @@ pub struct CodegenContext<'a> { pub crate_types: Vec, pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, // Handler to use for diagnostics produced during codegen. - pub handler: &'a Handler, + pub diag_emitter: SharedEmitter, // LLVM passes added by plugins. pub plugin_passes: Vec, // LLVM optimizations for which we want to print remarks. @@ -303,20 +304,24 @@ pub struct CodegenContext<'a> { // compiling incrementally pub incr_comp_session_dir: Option, // Channel back to the main control thread to send messages to - pub tx: Sender, + pub coordinator_send: Sender, +} - // Error messages... - pub shared_emitter: SharedEmitter, +impl<'a> CodegenContext<'a> { + fn create_diag_handler(&self) -> Handler { + Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + } } struct HandlerFreeVars<'a> { cgcx: &'a CodegenContext<'a>, + diag_handler: &'a Handler, } unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext<'a>, msg: &'b str, cookie: c_uint) { - cgcx.shared_emitter.inline_asm_error(cookie as u32, msg.to_string()); + cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string()); } unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, @@ -331,7 +336,7 @@ unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, } unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_void) { - let HandlerFreeVars { cgcx, .. } = *(user as *const HandlerFreeVars); + let HandlerFreeVars { cgcx, diag_handler, .. } = *(user as *const HandlerFreeVars); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -347,7 +352,7 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo }; if enabled { - cgcx.handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}", + diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}", opt.kind.describe(), opt.pass_name, opt.filename, @@ -363,6 +368,7 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo // Unsafe due to LLVM calls. unsafe fn optimize_and_codegen(cgcx: &CodegenContext, + diag_handler: &Handler, mtrans: ModuleTranslation, mllvm: ModuleLlvm, config: ModuleConfig, @@ -375,6 +381,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, let fv = HandlerFreeVars { cgcx: cgcx, + diag_handler: diag_handler, }; let fv = &fv as *const HandlerFreeVars as *mut c_void; @@ -409,7 +416,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llvm::PassKind::Function => fpm, llvm::PassKind::Module => mpm, llvm::PassKind::Other => { - cgcx.handler.err("Encountered LLVM pass kind we can't handle"); + diag_handler.err("Encountered LLVM pass kind we can't handle"); return true }, }; @@ -429,20 +436,20 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, for pass in &config.passes { if !addpass(pass) { - cgcx.handler.warn(&format!("unknown pass `{}`, ignoring", + diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass)); } } for pass in &cgcx.plugin_passes { if !addpass(pass) { - cgcx.handler.err(&format!("a plugin asked for LLVM pass \ + diag_handler.err(&format!("a plugin asked for LLVM pass \ `{}` but LLVM does not \ recognize it", pass)); } } - cgcx.handler.abort_if_errors(); + diag_handler.abort_if_errors(); // Finally, run the actual optimization passes time(config.time_passes, &format!("llvm function passes [{}]", cgcx.worker), || @@ -459,6 +466,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, let temp_no_opt_bc_filename = output_names.temp_path_ext("no-opt.lto.bc", module_name); lto::run(cgcx, + diag_handler, llmod, tm, &config, @@ -564,7 +572,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llmod }; with_codegen(tm, llmod, config.no_builtins, |cpm| { - write_output_file(cgcx.handler, tm, cpm, llmod, &path, + write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile) })?; if config.emit_obj { @@ -574,7 +582,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, if write_obj { with_codegen(tm, llmod, config.no_builtins, |cpm| { - write_output_file(cgcx.handler, tm, cpm, llmod, &obj_out, + write_output_file(diag_handler, tm, cpm, llmod, &obj_out, llvm::FileType::ObjectFile) })?; } @@ -585,14 +593,14 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, if copy_bc_to_obj { debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); if let Err(e) = link_or_copy(&bc_out, &obj_out) { - cgcx.handler.err(&format!("failed to copy bitcode to object file: {}", e)); + diag_handler.err(&format!("failed to copy bitcode to object file: {}", e)); } } if rm_bc { debug!("removing_bitcode {:?}", bc_out); if let Err(e) = fs::remove_file(&bc_out) { - cgcx.handler.err(&format!("failed to remove bitcode: {}", e)); + diag_handler.err(&format!("failed to remove bitcode: {}", e)); } } @@ -991,11 +999,13 @@ fn build_work_item(sess: &Session, fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) -> Result<(), FatalError> { + let diag_handler = cgcx.create_diag_handler(); unsafe { match work_item.mtrans.source { ModuleSource::Translated(mllvm) => { debug!("llvm-optimizing {:?}", work_item.mtrans.name); optimize_and_codegen(cgcx, + &diag_handler, work_item.mtrans, mllvm, work_item.config, @@ -1017,7 +1027,7 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) match link_or_copy(&source_file, &obj_out) { Ok(_) => { } Err(err) => { - cgcx.handler.err(&format!("unable to copy {} to {}: {}", + diag_handler.err(&format!("unable to copy {} to {}: {}", source_file.display(), obj_out.display(), err)); @@ -1069,6 +1079,30 @@ fn execute_work<'a>(sess: &'a Session, let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, &mut |cnum, path| { + if link::ignored_for_lto(sess, cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + + let cgcx = CodegenContext { + crate_types: sess.crate_types.borrow().clone(), + each_linked_rlib_for_lto: each_linked_rlib_for_lto, + lto: sess.lto(), + no_landing_pads: sess.no_landing_pads(), + opts: &sess.opts, + time_passes: sess.time_passes(), + exported_symbols: exported_symbols, + plugin_passes: sess.plugin_llvm_passes.borrow().clone(), + remark: sess.opts.cg.remark.clone(), + worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), + coordinator_send: tx.clone(), + diag_emitter: shared_emitter.clone(), + }; + // This is the "main loop" of parallel work happening for parallel codegen. // It's here that we manage parallelism, schedule work, and work with // messages coming from clients. @@ -1132,14 +1166,16 @@ fn execute_work<'a>(sess: &'a Session, // parallelism slots and work left to spawn. while work_items.len() > 0 && running < tokens.len() + 1 { let item = work_items.pop().unwrap(); - let index = work_items.len(); - spawn_work(sess, - exported_symbols, + let worker_index = work_items.len(); + + let cgcx = CodegenContext { + worker: worker_index, + .. cgcx.clone() + }; + + spawn_work(cgcx, scope, - tx.clone(), - shared_emitter.clone(), - item, - index); + item); running += 1; } @@ -1178,29 +1214,10 @@ fn execute_work<'a>(sess: &'a Session, sess.diagnostic().abort_if_errors(); } -fn spawn_work<'a>(sess: &'a Session, - exported_symbols: &'a ExportedSymbols, +fn spawn_work<'a>(cgcx: CodegenContext<'a>, scope: &Scope<'a>, - tx: Sender, - emitter: SharedEmitter, - work: WorkItem, - idx: usize) { - let plugin_passes = sess.plugin_llvm_passes.borrow().clone(); - let remark = sess.opts.cg.remark.clone(); - let incr_comp_session_dir = sess.incr_comp_session_dir_opt().map(|r| r.clone()); + work: WorkItem) { let depth = time_depth(); - let lto = sess.lto(); - let crate_types = sess.crate_types.borrow().clone(); - let mut each_linked_rlib_for_lto = Vec::new(); - drop(link::each_linked_rlib(sess, &mut |cnum, path| { - if link::ignored_for_lto(sess, cnum) { - return - } - each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); - })); - let time_passes = sess.time_passes(); - let no_landing_pads = sess.no_landing_pads(); - let opts = &sess.opts; scope.spawn(move || { set_time_depth(depth); @@ -1208,41 +1225,20 @@ fn spawn_work<'a>(sess: &'a Session, // Set up a destructor which will fire off a message that we're done as // we exit. struct Bomb { - tx: Sender, + coordinator_send: Sender, success: bool, } impl Drop for Bomb { fn drop(&mut self) { - drop(self.tx.send(Message::Done { success: self.success })); + drop(self.coordinator_send.send(Message::Done { success: self.success })); } } + let mut bomb = Bomb { - tx: tx.clone(), + coordinator_send: cgcx.coordinator_send.clone(), success: false, }; - // Set up our non-`Send` `CodegenContext` now that we're in a helper - // thread and have all our info available to us. - // let emitter = SharedEmitter { tx: tx.clone() }; - let diag_handler = Handler::with_emitter(true, false, Box::new(emitter.clone())); - - let cgcx = CodegenContext { - crate_types: crate_types, - each_linked_rlib_for_lto: each_linked_rlib_for_lto, - lto: lto, - no_landing_pads: no_landing_pads, - opts: opts, - time_passes: time_passes, - exported_symbols: exported_symbols, - handler: &diag_handler, - plugin_passes: plugin_passes, - remark: remark, - worker: idx, - incr_comp_session_dir: incr_comp_session_dir, - tx: tx.clone(), - shared_emitter: emitter, - }; - // Execute the work itself, and if it finishes successfully then flag // ourselves as a success as well. // From df6be33d84f14c286689938eb2a2686315926e9f Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 24 Jul 2017 14:21:28 +0200 Subject: [PATCH 082/213] async-llvm(4): Move work coordination to separate thread in order to free up the main thread for translation. --- src/librustc_trans/back/write.rs | 122 +++++++++++++++---------------- 1 file changed, 60 insertions(+), 62 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index d2fc96828527..08eccd8fdf3a 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -28,7 +28,6 @@ use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; use context::{is_pie_binary, get_reloc_model}; use jobserver::{Client, Acquired}; -use crossbeam::{scope, Scope}; use rustc_demangle; use std::cmp; @@ -38,8 +37,10 @@ use std::io; use std::io::Write; use std::path::{Path, PathBuf}; use std::str; +use std::sync::Arc; use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; +use std::thread; use libc::{c_uint, c_void, c_char, size_t}; pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [ @@ -283,13 +284,13 @@ impl ModuleConfig { /// Additional resources used by optimize_and_codegen (not module specific) #[derive(Clone)] -pub struct CodegenContext<'a> { +pub struct CodegenContext { // Resouces needed when running LTO pub time_passes: bool, pub lto: bool, pub no_landing_pads: bool, - pub exported_symbols: &'a ExportedSymbols, - pub opts: &'a config::Options, + pub exported_symbols: Arc, + pub opts: Arc, pub crate_types: Vec, pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, // Handler to use for diagnostics produced during codegen. @@ -307,18 +308,18 @@ pub struct CodegenContext<'a> { pub coordinator_send: Sender, } -impl<'a> CodegenContext<'a> { +impl CodegenContext { fn create_diag_handler(&self) -> Handler { Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) } } struct HandlerFreeVars<'a> { - cgcx: &'a CodegenContext<'a>, + cgcx: &'a CodegenContext, diag_handler: &'a Handler, } -unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext<'a>, +unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, msg: &'b str, cookie: c_uint) { cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string()); @@ -775,9 +776,8 @@ pub fn run_passes(sess: &Session, let num_workers = cmp::min(work_items.len() - 1, 32); Client::new(num_workers).expect("failed to create jobserver") }); - scope(|scope| { - execute_work(sess, work_items, client, &trans.exported_symbols, scope); - }); + + execute_work(sess, work_items, client, trans.exported_symbols.clone()); // If in incr. comp. mode, preserve the `.o` files for potential re-use for mtrans in modules.iter() { @@ -1052,11 +1052,10 @@ pub struct Diagnostic { lvl: Level, } -fn execute_work<'a>(sess: &'a Session, - mut work_items: Vec, - jobserver: Client, - exported_symbols: &'a ExportedSymbols, - scope: &Scope<'a>) { +fn execute_work(sess: &Session, + mut work_items: Vec, + jobserver: Client, + exported_symbols: Arc) { let (tx, rx) = channel(); let tx2 = tx.clone(); @@ -1092,7 +1091,7 @@ fn execute_work<'a>(sess: &'a Session, each_linked_rlib_for_lto: each_linked_rlib_for_lto, lto: sess.lto(), no_landing_pads: sess.no_landing_pads(), - opts: &sess.opts, + opts: Arc::new(sess.opts.clone()), time_passes: sess.time_passes(), exported_symbols: exported_symbols, plugin_passes: sess.plugin_llvm_passes.borrow().clone(), @@ -1158,68 +1157,67 @@ fn execute_work<'a>(sess: &'a Session, // Before that work finishes, however, we may acquire a token. In that case // we actually wastefully acquired the token, so we relinquish it back to // the jobserver. - let mut tokens = Vec::new(); - let mut running = 0; - while work_items.len() > 0 || running > 0 { - // Spin up what work we can, only doing this while we've got available - // parallelism slots and work left to spawn. - while work_items.len() > 0 && running < tokens.len() + 1 { - let item = work_items.pop().unwrap(); - let worker_index = work_items.len(); + thread::spawn(move || { + let mut tokens = Vec::new(); + let mut running = 0; + while work_items.len() > 0 || running > 0 { - let cgcx = CodegenContext { - worker: worker_index, - .. cgcx.clone() - }; + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while work_items.len() > 0 && running < tokens.len() + 1 { + let item = work_items.pop().unwrap(); + let worker_index = work_items.len(); - spawn_work(cgcx, - scope, - item); - running += 1; - } + let cgcx = CodegenContext { + worker: worker_index, + .. cgcx.clone() + }; - // Relinquish accidentally acquired extra tokens - tokens.truncate(running.saturating_sub(1)); - - match rx.recv().unwrap() { - // Save the token locally and the next turn of the loop will use - // this to spawn a new unit of work, or it may get dropped - // immediately if we have no more work to spawn. - Message::Token(token) => { - tokens.push(token.expect("failed to acquire jobserver token")); + spawn_work(cgcx, item); + running += 1; } - // If a thread exits successfully then we drop a token associated - // with that worker and update our `running` count. We may later - // re-acquire a token to continue running more work. We may also not - // actually drop a token here if the worker was running with an - // "ephemeral token" - // - // Note that if the thread failed that means it panicked, so we - // abort immediately. - Message::Done { success: true } => { - drop(tokens.pop()); - running -= 1; - } - Message::Done { success: false } => { - shared_emitter.fatal("aborting due to worker thread panic".to_string()); + // Relinquish accidentally acquired extra tokens + tokens.truncate(running.saturating_sub(1)); + + match rx.recv().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + tokens.push(token.expect("failed to acquire jobserver token")); + } + + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { success: true } => { + drop(tokens.pop()); + running -= 1; + } + Message::Done { success: false } => { + shared_emitter.fatal("aborting due to worker thread panic".to_string()); + } } } + }).join().unwrap(); - shared_emitter_main.check(sess); - } + shared_emitter_main.check(sess); // Just in case, check this on the way out. sess.diagnostic().abort_if_errors(); } -fn spawn_work<'a>(cgcx: CodegenContext<'a>, - scope: &Scope<'a>, - work: WorkItem) { +fn spawn_work(cgcx: CodegenContext, work: WorkItem) { let depth = time_depth(); - scope.spawn(move || { + thread::spawn(move || { set_time_depth(depth); // Set up a destructor which will fire off a message that we're done as From b18a61a15ba5c267f7b25edebf3ff4aa7c3896f6 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 24 Jul 2017 14:51:00 +0200 Subject: [PATCH 083/213] async-llvm(5): Do continuous error handling on main thread. --- src/librustc_trans/back/write.rs | 69 +++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 14 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 08eccd8fdf3a..9e4c1b87aacc 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -32,6 +32,7 @@ use rustc_demangle; use std::cmp; use std::ffi::CString; +use std::fmt; use std::fs; use std::io; use std::io::Write; @@ -777,7 +778,33 @@ pub fn run_passes(sess: &Session, Client::new(num_workers).expect("failed to create jobserver") }); - execute_work(sess, work_items, client, trans.exported_symbols.clone()); + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let (trans_worker_send, trans_worker_receive) = channel(); + + let coordinator_thread = start_executing_work(sess, + work_items, + shared_emitter, + trans_worker_send, + client, + trans.exported_symbols.clone()); + loop { + shared_emitter_main.check(sess); + + match trans_worker_receive.recv() { + Ok(Message::AllWorkDone) | + Err(_) => break, + + Ok(Message::CheckErrorMessages) => continue, + Ok(msg) => { + bug!("unexpected message {:?}", msg); + } + } + } + + coordinator_thread.join().unwrap(); + + // Just in case, check this on the way out. + sess.diagnostic().abort_if_errors(); // If in incr. comp. mode, preserve the `.o` files for potential re-use for mtrans in modules.iter() { @@ -975,12 +1002,18 @@ pub fn dump_incremental_data(trans: &CrateTranslation) { eprintln!("incremental: re-using {} out of {} modules", reuse, trans.modules.len()); } -struct WorkItem { +pub struct WorkItem { mtrans: ModuleTranslation, config: ModuleConfig, output_names: OutputFilenames } +impl fmt::Debug for WorkItem { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "WorkItem({})", self.mtrans.name) + } +} + fn build_work_item(sess: &Session, mtrans: ModuleTranslation, config: ModuleConfig, @@ -1041,21 +1074,29 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) Ok(()) } +#[derive(Debug)] pub enum Message { Token(io::Result), Done { success: bool }, + WorkItem(WorkItem), + CheckErrorMessages, + AllWorkDone, } + pub struct Diagnostic { msg: String, code: Option, lvl: Level, } -fn execute_work(sess: &Session, - mut work_items: Vec, - jobserver: Client, - exported_symbols: Arc) { +fn start_executing_work(sess: &Session, + mut work_items: Vec, + shared_emitter: SharedEmitter, + trans_worker_send: Sender, + jobserver: Client, + exported_symbols: Arc) + -> thread::JoinHandle<()> { let (tx, rx) = channel(); let tx2 = tx.clone(); @@ -1076,8 +1117,6 @@ fn execute_work(sess: &Session, helper.request_token(); } - let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); - let mut each_linked_rlib_for_lto = Vec::new(); drop(link::each_linked_rlib(sess, &mut |cnum, path| { if link::ignored_for_lto(sess, cnum) { @@ -1200,18 +1239,20 @@ fn execute_work(sess: &Session, Message::Done { success: true } => { drop(tokens.pop()); running -= 1; + trans_worker_send.send(Message::CheckErrorMessages).unwrap(); } Message::Done { success: false } => { shared_emitter.fatal("aborting due to worker thread panic".to_string()); + trans_worker_send.send(Message::CheckErrorMessages).unwrap(); + } + msg @ Message::WorkItem(_) | + msg @ Message::AllWorkDone | + msg @ Message::CheckErrorMessages => { + bug!("unexpected message: {:?}", msg); } } } - }).join().unwrap(); - - shared_emitter_main.check(sess); - - // Just in case, check this on the way out. - sess.diagnostic().abort_if_errors(); + }) } fn spawn_work(cgcx: CodegenContext, work: WorkItem) { From 8f6894e177cecf3cd35833e2063256a69841415a Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 24 Jul 2017 15:50:42 +0200 Subject: [PATCH 084/213] async-llvm(6): Make the LLVM work coordinator get its work package through a channel instead of upfront. --- src/librustc_trans/back/write.rs | 77 +++++++++++++++++++++----------- 1 file changed, 51 insertions(+), 26 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 9e4c1b87aacc..ee3c9ace7dc1 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -780,19 +780,31 @@ pub fn run_passes(sess: &Session, let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); let (trans_worker_send, trans_worker_receive) = channel(); + let (coordinator_send, coordinator_receive) = channel(); let coordinator_thread = start_executing_work(sess, - work_items, + work_items.len(), shared_emitter, trans_worker_send, + coordinator_send.clone(), + coordinator_receive, client, trans.exported_symbols.clone()); + for work_item in work_items { + coordinator_send.send(Message::WorkItem(work_item)).unwrap(); + } + loop { shared_emitter_main.check(sess); match trans_worker_receive.recv() { - Ok(Message::AllWorkDone) | - Err(_) => break, + Err(_) => { + // An `Err` here means that all senders for this channel have + // been closed. This could happen because all work has + // completed successfully or there has been some error. + // At this point we don't care which it is. + break + } Ok(Message::CheckErrorMessages) => continue, Ok(msg) => { @@ -801,9 +813,15 @@ pub fn run_passes(sess: &Session, } } - coordinator_thread.join().unwrap(); + match coordinator_thread.join() { + Ok(()) => {}, + Err(err) => { + panic!("error: {:?}", err); + } + } // Just in case, check this on the way out. + shared_emitter_main.check(sess); sess.diagnostic().abort_if_errors(); // If in incr. comp. mode, preserve the `.o` files for potential re-use @@ -1080,7 +1098,6 @@ pub enum Message { Done { success: bool }, WorkItem(WorkItem), CheckErrorMessages, - AllWorkDone, } @@ -1091,15 +1108,14 @@ pub struct Diagnostic { } fn start_executing_work(sess: &Session, - mut work_items: Vec, + total_work_item_count: usize, shared_emitter: SharedEmitter, trans_worker_send: Sender, + coordinator_send: Sender, + coordinator_receive: Receiver, jobserver: Client, exported_symbols: Arc) - -> thread::JoinHandle<()> { - let (tx, rx) = channel(); - let tx2 = tx.clone(); - + -> thread::JoinHandle<()> { // First up, convert our jobserver into a helper thread so we can use normal // mpsc channels to manage our messages and such. Once we've got the helper // thread then request `n-1` tokens because all of our work items are ready @@ -1110,10 +1126,11 @@ fn start_executing_work(sess: &Session, // // After we've requested all these tokens then we'll, when we can, get // tokens on `rx` above which will get managed in the main loop below. + let coordinator_send2 = coordinator_send.clone(); let helper = jobserver.into_helper_thread(move |token| { - drop(tx2.send(Message::Token(token))); + drop(coordinator_send2.send(Message::Token(token))); }).expect("failed to spawn helper thread"); - for _ in 0..work_items.len() - 1 { + for _ in 0..total_work_item_count - 1 { helper.request_token(); } @@ -1137,7 +1154,7 @@ fn start_executing_work(sess: &Session, remark: sess.opts.cg.remark.clone(), worker: 0, incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), - coordinator_send: tx.clone(), + coordinator_send: coordinator_send, diag_emitter: shared_emitter.clone(), }; @@ -1198,29 +1215,35 @@ fn start_executing_work(sess: &Session, // the jobserver. thread::spawn(move || { + let mut work_items_left = total_work_item_count; + let mut work_items = Vec::with_capacity(total_work_item_count); let mut tokens = Vec::new(); let mut running = 0; - while work_items.len() > 0 || running > 0 { + while work_items_left > 0 || running > 0 { // Spin up what work we can, only doing this while we've got available // parallelism slots and work left to spawn. - while work_items.len() > 0 && running < tokens.len() + 1 { - let item = work_items.pop().unwrap(); - let worker_index = work_items.len(); + while work_items_left > 0 && running < tokens.len() + 1 { + if let Some(item) = work_items.pop() { + work_items_left -= 1; + let worker_index = work_items_left; - let cgcx = CodegenContext { - worker: worker_index, - .. cgcx.clone() - }; + let cgcx = CodegenContext { + worker: worker_index, + .. cgcx.clone() + }; - spawn_work(cgcx, item); - running += 1; + spawn_work(cgcx, item); + running += 1; + } else { + break + } } // Relinquish accidentally acquired extra tokens tokens.truncate(running.saturating_sub(1)); - match rx.recv().unwrap() { + match coordinator_receive.recv().unwrap() { // Save the token locally and the next turn of the loop will use // this to spawn a new unit of work, or it may get dropped // immediately if we have no more work to spawn. @@ -1228,6 +1251,10 @@ fn start_executing_work(sess: &Session, tokens.push(token.expect("failed to acquire jobserver token")); } + Message::WorkItem(work_item) => { + work_items.push(work_item); + } + // If a thread exits successfully then we drop a token associated // with that worker and update our `running` count. We may later // re-acquire a token to continue running more work. We may also not @@ -1245,8 +1272,6 @@ fn start_executing_work(sess: &Session, shared_emitter.fatal("aborting due to worker thread panic".to_string()); trans_worker_send.send(Message::CheckErrorMessages).unwrap(); } - msg @ Message::WorkItem(_) | - msg @ Message::AllWorkDone | msg @ Message::CheckErrorMessages => { bug!("unexpected message: {:?}", msg); } From 4282dd87ea79ea7836978e0b45514aea35dc1d0d Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 24 Jul 2017 16:18:11 +0200 Subject: [PATCH 085/213] async-llvm(7): Clean up error handling a bit. --- src/librustc_trans/back/write.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index ee3c9ace7dc1..5d9444218c4d 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -1248,7 +1248,13 @@ fn start_executing_work(sess: &Session, // this to spawn a new unit of work, or it may get dropped // immediately if we have no more work to spawn. Message::Token(token) => { - tokens.push(token.expect("failed to acquire jobserver token")); + if let Ok(token) = token { + tokens.push(token); + } else { + shared_emitter.fatal("failed to acquire jobserver token"); + drop(trans_worker_send.send(Message::CheckErrorMessages)); + return + } } Message::WorkItem(work_item) => { @@ -1266,11 +1272,12 @@ fn start_executing_work(sess: &Session, Message::Done { success: true } => { drop(tokens.pop()); running -= 1; - trans_worker_send.send(Message::CheckErrorMessages).unwrap(); + drop(trans_worker_send.send(Message::CheckErrorMessages)); } Message::Done { success: false } => { - shared_emitter.fatal("aborting due to worker thread panic".to_string()); - trans_worker_send.send(Message::CheckErrorMessages).unwrap(); + shared_emitter.fatal("aborting due to worker thread panic"); + drop(trans_worker_send.send(Message::CheckErrorMessages)); + return } msg @ Message::CheckErrorMessages => { bug!("unexpected message: {:?}", msg); @@ -1440,8 +1447,8 @@ impl SharedEmitter { drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); } - fn fatal(&self, msg: String) { - drop(self.sender.send(SharedEmitterMessage::Fatal(msg))); + fn fatal(&self, msg: &str) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); } } From 645841ea446ed0772835f07fb62aaeecdb06b604 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Tue, 25 Jul 2017 17:26:24 +0200 Subject: [PATCH 086/213] async-llvm(8): Clean up resource management and drop LLVM modules ASAP. --- src/librustc_driver/driver.rs | 2 - src/librustc_trans/back/write.rs | 315 ++++++++++++++++++------------- src/librustc_trans/base.rs | 27 ++- src/librustc_trans/lib.rs | 63 ++++++- 4 files changed, 256 insertions(+), 151 deletions(-) diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 1bc3f59ed047..ff9f666af75d 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -237,8 +237,6 @@ pub fn compile_input(sess: &Session, phase5_result); phase5_result?; - write::cleanup_llvm(&trans); - phase_6_link_output(sess, &trans, &outputs); // Now that we won't touch anything in the incremental compilation directory diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 5d9444218c4d..83f7f574493e 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -18,7 +18,8 @@ use rustc::session::Session; use llvm; use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef}; use llvm::SMDiagnosticRef; -use {CrateTranslation, OngoingCrateTranslation, ModuleLlvm, ModuleSource, ModuleTranslation}; +use {CrateTranslation, OngoingCrateTranslation, ModuleSource, ModuleTranslation, + CompiledModule, ModuleKind}; use rustc::hir::def_id::CrateNum; use rustc::util::common::{time, time_depth, set_time_depth, path2cstr}; use rustc::util::fs::link_or_copy; @@ -192,7 +193,6 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef { /// Module-specific configuration for `optimize_and_codegen`. -#[derive(Clone)] pub struct ModuleConfig { /// LLVM TargetMachine to use for codegen. tm: TargetMachineRef, @@ -231,9 +231,9 @@ pub struct ModuleConfig { unsafe impl Send for ModuleConfig { } impl ModuleConfig { - fn new(tm: TargetMachineRef, passes: Vec) -> ModuleConfig { + fn new(sess: &Session, passes: Vec) -> ModuleConfig { ModuleConfig { - tm: tm, + tm: create_target_machine(sess), passes: passes, opt_level: None, opt_size: None, @@ -281,6 +281,40 @@ impl ModuleConfig { self.merge_functions = sess.opts.optimize == config::OptLevel::Default || sess.opts.optimize == config::OptLevel::Aggressive; } + + fn clone(&self, sess: &Session) -> ModuleConfig { + ModuleConfig { + tm: create_target_machine(sess), + passes: self.passes.clone(), + opt_level: self.opt_level, + opt_size: self.opt_size, + + emit_no_opt_bc: self.emit_no_opt_bc, + emit_bc: self.emit_bc, + emit_lto_bc: self.emit_lto_bc, + emit_ir: self.emit_ir, + emit_asm: self.emit_asm, + emit_obj: self.emit_obj, + obj_is_bitcode: self.obj_is_bitcode, + + no_verify: self.no_verify, + no_prepopulate_passes: self.no_prepopulate_passes, + no_builtins: self.no_builtins, + time_passes: self.time_passes, + vectorize_loop: self.vectorize_loop, + vectorize_slp: self.vectorize_slp, + merge_functions: self.merge_functions, + inline_threshold: self.inline_threshold, + } + } +} + +impl Drop for ModuleConfig { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustDisposeTargetMachine(self.tm); + } + } } /// Additional resources used by optimize_and_codegen (not module specific) @@ -372,13 +406,17 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo unsafe fn optimize_and_codegen(cgcx: &CodegenContext, diag_handler: &Handler, mtrans: ModuleTranslation, - mllvm: ModuleLlvm, config: ModuleConfig, output_names: OutputFilenames) - -> Result<(), FatalError> + -> Result { - let llmod = mllvm.llmod; - let llcx = mllvm.llcx; + let (llmod, llcx) = match mtrans.source { + ModuleSource::Translated(ref llvm) => (llvm.llmod, llvm.llcx), + ModuleSource::Preexisting(_) => { + bug!("optimize_and_codegen: called with ModuleSource::Preexisting") + } + }; + let tm = config.tm; let fv = HandlerFreeVars { @@ -390,7 +428,8 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, fv); llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, fv); - let module_name = Some(&mtrans.name[..]); + let module_name = mtrans.name.clone(); + let module_name = Some(&module_name[..]); if config.emit_no_opt_bc { let out = output_names.temp_path_ext("no-opt.bc", module_name); @@ -606,30 +645,13 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, } } - llvm::LLVMRustDisposeTargetMachine(tm); - Ok(()) + Ok(mtrans.into_compiled_module(config.emit_obj, config.emit_bc)) } - -pub fn cleanup_llvm(trans: &CrateTranslation) { - for module in trans.modules.iter() { - unsafe { - match module.source { - ModuleSource::Translated(llvm) => { - llvm::LLVMDisposeModule(llvm.llmod); - llvm::LLVMContextDispose(llvm.llcx); - } - ModuleSource::Preexisting(_) => { - } - } - } - } -} - -pub struct RunLLVMPassesResult { - pub modules: Vec, - pub metadata_module: ModuleTranslation, - pub allocator_module: Option, +pub struct CompiledModules { + pub modules: Vec, + pub metadata_module: CompiledModule, + pub allocator_module: Option, } pub fn run_passes(sess: &Session, @@ -658,13 +680,11 @@ pub fn run_passes(sess: &Session, !sess.opts.output_types.should_trans() || sess.opts.debugging_opts.no_trans); - let tm = create_target_machine(sess); - // Figure out what we actually need to build. - let mut modules_config = ModuleConfig::new(tm, sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(tm, vec![]); - let mut allocator_config = ModuleConfig::new(tm, vec![]); + let mut modules_config = ModuleConfig::new(sess, sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(sess, vec![]); + let mut allocator_config = ModuleConfig::new(sess, vec![]); if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { match *sanitizer { @@ -747,25 +767,22 @@ pub fn run_passes(sess: &Session, let mut work_items = Vec::with_capacity(1 + modules.len()); { - let work = build_work_item(sess, - metadata_module.clone(), - metadata_config.clone(), + let work = build_work_item(metadata_module, + metadata_config.clone(sess), crate_output.clone()); work_items.push(work); } - if let Some(allocator) = allocator_module.clone() { - let work = build_work_item(sess, - allocator, - allocator_config.clone(), + if let Some(allocator) = allocator_module { + let work = build_work_item(allocator, + allocator_config.clone(sess), crate_output.clone()); work_items.push(work); } - for mtrans in modules.iter() { - let work = build_work_item(sess, - mtrans.clone(), - modules_config.clone(), + for mtrans in modules { + let work = build_work_item(mtrans, + modules_config.clone(sess), crate_output.clone()); work_items.push(work); } @@ -778,6 +795,10 @@ pub fn run_passes(sess: &Session, Client::new(num_workers).expect("failed to create jobserver") }); + drop(modules_config); + drop(metadata_config); + drop(allocator_config); + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); let (trans_worker_send, trans_worker_receive) = channel(); let (coordinator_send, coordinator_receive) = channel(); @@ -813,37 +834,27 @@ pub fn run_passes(sess: &Session, } } - match coordinator_thread.join() { - Ok(()) => {}, - Err(err) => { - panic!("error: {:?}", err); - } - } + let compiled_modules = coordinator_thread.join().unwrap(); // Just in case, check this on the way out. shared_emitter_main.check(sess); sess.diagnostic().abort_if_errors(); // If in incr. comp. mode, preserve the `.o` files for potential re-use - for mtrans in modules.iter() { + for module in compiled_modules.modules.iter() { let mut files = vec![]; - if modules_config.emit_obj { - let path = crate_output.temp_path(OutputType::Object, Some(&mtrans.name)); + if module.emit_obj { + let path = crate_output.temp_path(OutputType::Object, Some(&module.name)); files.push((OutputType::Object, path)); } - if modules_config.emit_bc { - let path = crate_output.temp_path(OutputType::Bitcode, Some(&mtrans.name)); + if module.emit_bc { + let path = crate_output.temp_path(OutputType::Bitcode, Some(&module.name)); files.push((OutputType::Bitcode, path)); } - save_trans_partition(sess, &mtrans.name, mtrans.symbol_name_hash, &files); - } - - // All codegen is finished. - unsafe { - llvm::LLVMRustDisposeTargetMachine(tm); + save_trans_partition(sess, &module.name, module.symbol_name_hash, &files); } let mut user_wants_bitcode = false; @@ -858,10 +869,10 @@ pub fn run_passes(sess: &Session, let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| { - if modules.len() == 1 { + if compiled_modules.modules.len() == 1 { // 1) Only one codegen unit. In this case it's no difficulty // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&modules[0].name[..]); + let module_name = Some(&compiled_modules.modules[0].name[..]); let path = crate_output.temp_path(output_type, module_name); copy_gracefully(&path, &crate_output.path(output_type)); @@ -962,27 +973,30 @@ pub fn run_passes(sess: &Session, let keep_numbered_objects = needs_crate_object || (user_wants_objects && sess.opts.cg.codegen_units > 1); - for module_name in modules.iter().map(|m| Some(&m.name[..])) { - if modules_config.emit_obj && !keep_numbered_objects { + for module in compiled_modules.modules.iter() { + let module_name = Some(&module.name[..]); + + if module.emit_obj && !keep_numbered_objects { let path = crate_output.temp_path(OutputType::Object, module_name); remove(sess, &path); } - if modules_config.emit_bc && !keep_numbered_bitcode { + if module.emit_bc && !keep_numbered_bitcode { let path = crate_output.temp_path(OutputType::Bitcode, module_name); remove(sess, &path); } } - if metadata_config.emit_bc && !user_wants_bitcode { + if compiled_modules.metadata_module.emit_bc && !user_wants_bitcode { let path = crate_output.temp_path(OutputType::Bitcode, - Some(&metadata_module.name)); + Some(&compiled_modules.metadata_module.name)); remove(sess, &path); } - if allocator_config.emit_bc && !user_wants_bitcode { - if let Some(ref module) = allocator_module { + + if let Some(ref allocator_module) = compiled_modules.allocator_module { + if allocator_module.emit_bc && !user_wants_bitcode { let path = crate_output.temp_path(OutputType::Bitcode, - Some(&module.name)); + Some(&allocator_module.name)); remove(sess, &path); } } @@ -1000,21 +1014,14 @@ pub fn run_passes(sess: &Session, unsafe { llvm::LLVMRustPrintPassTimings(); } } - *trans.result.borrow_mut() = Some( - RunLLVMPassesResult { - modules, - metadata_module, - allocator_module, - } - ); + *trans.result.borrow_mut() = Some(compiled_modules); } pub fn dump_incremental_data(trans: &CrateTranslation) { let mut reuse = 0; for mtrans in trans.modules.iter() { - match mtrans.source { - ModuleSource::Preexisting(..) => reuse += 1, - ModuleSource::Translated(..) => (), + if mtrans.pre_existing { + reuse += 1; } } eprintln!("incremental: re-using {} out of {} modules", reuse, trans.modules.len()); @@ -1032,14 +1039,11 @@ impl fmt::Debug for WorkItem { } } -fn build_work_item(sess: &Session, - mtrans: ModuleTranslation, +fn build_work_item(mtrans: ModuleTranslation, config: ModuleConfig, output_names: OutputFilenames) -> WorkItem { - let mut config = config; - config.tm = create_target_machine(sess); WorkItem { mtrans: mtrans, config: config, @@ -1048,54 +1052,65 @@ fn build_work_item(sess: &Session, } fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) - -> Result<(), FatalError> + -> Result { let diag_handler = cgcx.create_diag_handler(); - unsafe { - match work_item.mtrans.source { - ModuleSource::Translated(mllvm) => { - debug!("llvm-optimizing {:?}", work_item.mtrans.name); - optimize_and_codegen(cgcx, - &diag_handler, - work_item.mtrans, - mllvm, - work_item.config, - work_item.output_names)?; - } - ModuleSource::Preexisting(wp) => { - let incr_comp_session_dir = cgcx.incr_comp_session_dir - .as_ref() - .unwrap(); - let name = &work_item.mtrans.name; - for (kind, saved_file) in wp.saved_files { - let obj_out = work_item.output_names.temp_path(kind, Some(name)); - let source_file = in_incr_comp_dir(&incr_comp_session_dir, - &saved_file); - debug!("copying pre-existing module `{}` from {:?} to {}", - work_item.mtrans.name, - source_file, - obj_out.display()); - match link_or_copy(&source_file, &obj_out) { - Ok(_) => { } - Err(err) => { - diag_handler.err(&format!("unable to copy {} to {}: {}", - source_file.display(), - obj_out.display(), - err)); - } - } + let module_name = work_item.mtrans.name.clone(); + + let pre_existing = match work_item.mtrans.source { + ModuleSource::Translated(_) => None, + ModuleSource::Preexisting(ref wp) => Some(wp.clone()), + }; + + if let Some(wp) = pre_existing { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let name = &work_item.mtrans.name; + for (kind, saved_file) in wp.saved_files { + let obj_out = work_item.output_names.temp_path(kind, Some(name)); + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + work_item.mtrans.name, + source_file, + obj_out.display()); + match link_or_copy(&source_file, &obj_out) { + Ok(_) => { } + Err(err) => { + diag_handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); } } } - } - Ok(()) + Ok(CompiledModule { + name: module_name, + kind: ModuleKind::Regular, + pre_existing: true, + symbol_name_hash: work_item.mtrans.symbol_name_hash, + emit_bc: work_item.config.emit_bc, + emit_obj: work_item.config.emit_obj, + }) + } else { + debug!("llvm-optimizing {:?}", module_name); + + unsafe { + optimize_and_codegen(cgcx, + &diag_handler, + work_item.mtrans, + work_item.config, + work_item.output_names) + } + } } #[derive(Debug)] pub enum Message { Token(io::Result), - Done { success: bool }, + Done { result: Result }, WorkItem(WorkItem), CheckErrorMessages, } @@ -1115,7 +1130,7 @@ fn start_executing_work(sess: &Session, coordinator_receive: Receiver, jobserver: Client, exported_symbols: Arc) - -> thread::JoinHandle<()> { + -> thread::JoinHandle { // First up, convert our jobserver into a helper thread so we can use normal // mpsc channels to manage our messages and such. Once we've got the helper // thread then request `n-1` tokens because all of our work items are ready @@ -1215,6 +1230,10 @@ fn start_executing_work(sess: &Session, // the jobserver. thread::spawn(move || { + let mut compiled_modules = vec![]; + let mut compiled_metadata_module = None; + let mut compiled_allocator_module = None; + let mut work_items_left = total_work_item_count; let mut work_items = Vec::with_capacity(total_work_item_count); let mut tokens = Vec::new(); @@ -1253,7 +1272,8 @@ fn start_executing_work(sess: &Session, } else { shared_emitter.fatal("failed to acquire jobserver token"); drop(trans_worker_send.send(Message::CheckErrorMessages)); - return + // Exit the coordinator thread + panic!() } } @@ -1269,21 +1289,42 @@ fn start_executing_work(sess: &Session, // // Note that if the thread failed that means it panicked, so we // abort immediately. - Message::Done { success: true } => { + Message::Done { result: Ok(compiled_module) } => { drop(tokens.pop()); running -= 1; drop(trans_worker_send.send(Message::CheckErrorMessages)); + + match compiled_module.kind { + ModuleKind::Regular => { + compiled_modules.push(compiled_module); + } + ModuleKind::Metadata => { + assert!(compiled_metadata_module.is_none()); + compiled_metadata_module = Some(compiled_module); + } + ModuleKind::Allocator => { + assert!(compiled_allocator_module.is_none()); + compiled_allocator_module = Some(compiled_module); + } + } } - Message::Done { success: false } => { + Message::Done { result: Err(()) } => { shared_emitter.fatal("aborting due to worker thread panic"); drop(trans_worker_send.send(Message::CheckErrorMessages)); - return + // Exit the coordinator thread + panic!() } msg @ Message::CheckErrorMessages => { bug!("unexpected message: {:?}", msg); } } } + + CompiledModules { + modules: compiled_modules, + metadata_module: compiled_metadata_module.unwrap(), + allocator_module: compiled_allocator_module, + } }) } @@ -1297,17 +1338,22 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { // we exit. struct Bomb { coordinator_send: Sender, - success: bool, + result: Option, } impl Drop for Bomb { fn drop(&mut self) { - drop(self.coordinator_send.send(Message::Done { success: self.success })); + let result = match self.result.take() { + Some(compiled_module) => Ok(compiled_module), + None => Err(()) + }; + + drop(self.coordinator_send.send(Message::Done { result })); } } let mut bomb = Bomb { coordinator_send: cgcx.coordinator_send.clone(), - success: false, + result: None, }; // Execute the work itself, and if it finishes successfully then flag @@ -1323,8 +1369,7 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { // we just ignore the result and then send off our message saying that // we're done, which if `execute_work_item` failed is unlikely to be // seen by the main thread, but hey we might as well try anyway. - drop(execute_work_item(&cgcx, work).is_err()); - bomb.success = true; + bomb.result = Some(execute_work_item(&cgcx, work).unwrap()); }); } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 5582079c78f1..7ece92ef9dd6 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -27,6 +27,7 @@ use super::OngoingCrateTranslation; use super::ModuleLlvm; use super::ModuleSource; use super::ModuleTranslation; +use super::ModuleKind; use assert_module_sources; use back::link; @@ -952,6 +953,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, llcx: metadata_llcx, llmod: metadata_llmod, }), + kind: ModuleKind::Metadata, }; let no_builtins = attr::contains_name(&krate.attrs, "no_builtins"); @@ -961,7 +963,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, !tcx.sess.opts.output_types.should_trans() { let empty_exported_symbols = ExportedSymbols::empty(); let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); - return OngoingCrateTranslation { + let crate_translation = OngoingCrateTranslation { crate_name: tcx.crate_name(LOCAL_CRATE), link: link_meta, metadata: metadata, @@ -970,12 +972,18 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, linker_info: linker_info, windows_subsystem: None, no_integrated_as: false, - result: ::std::cell::RefCell::new(Some(::back::write::RunLLVMPassesResult { - modules: vec![], - metadata_module: metadata_module, - allocator_module: None, - })), + result: ::std::cell::RefCell::new(None), }; + + ::back::write::run_passes(tcx.sess, + &crate_translation, + vec![], + metadata_module, + None, + &output_filenames.outputs, + output_filenames); + + return crate_translation; } let exported_symbols = Arc::new(ExportedSymbols::compute(tcx, @@ -1047,7 +1055,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let module = ModuleTranslation { name: cgu_name, symbol_name_hash, - source: ModuleSource::Preexisting(buf.clone()) + source: ModuleSource::Preexisting(buf.clone()), + kind: ModuleKind::Regular, }; return (Stats::default(), module); } @@ -1108,7 +1117,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, source: ModuleSource::Translated(ModuleLlvm { llcx: ccx.llcx(), llmod: ccx.llmod(), - }) + }), + kind: ModuleKind::Regular, } }; @@ -1196,6 +1206,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, name: link::ALLOCATOR_MODULE_NAME.to_string(), symbol_name_hash: 0, // we always rebuild allocator shims source: ModuleSource::Translated(modules), + kind: ModuleKind::Allocator, }) } }); diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index e7debe3919b4..1df1bd272fd8 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -135,7 +135,6 @@ mod type_; mod type_of; mod value; -#[derive(Clone)] pub struct ModuleTranslation { /// The name of the module. When the crate may be saved between /// compilations, incremental compilation requires that name be @@ -145,6 +144,58 @@ pub struct ModuleTranslation { pub name: String, pub symbol_name_hash: u64, pub source: ModuleSource, + pub kind: ModuleKind, +} + +#[derive(Copy, Clone, Debug)] +pub enum ModuleKind { + Regular, + Metadata, + Allocator, +} + +impl ModuleTranslation { + pub fn into_compiled_module(self, emit_obj: bool, emit_bc: bool) -> CompiledModule { + let pre_existing = match self.source { + ModuleSource::Preexisting(_) => true, + ModuleSource::Translated(_) => false, + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + symbol_name_hash: self.symbol_name_hash, + pre_existing, + emit_obj, + emit_bc, + } + } +} + +impl Drop for ModuleTranslation { + fn drop(&mut self) { + match self.source { + ModuleSource::Preexisting(_) => { + // Nothing to dispose. + }, + ModuleSource::Translated(llvm) => { + unsafe { + llvm::LLVMDisposeModule(llvm.llmod); + llvm::LLVMContextDispose(llvm.llcx); + } + }, + } + } +} + +#[derive(Debug)] +pub struct CompiledModule { + pub name: String, + pub kind: ModuleKind, + pub symbol_name_hash: u64, + pub pre_existing: bool, + pub emit_obj: bool, + pub emit_bc: bool, } #[derive(Clone)] @@ -156,7 +207,7 @@ pub enum ModuleSource { Translated(ModuleLlvm), } -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub struct ModuleLlvm { pub llcx: llvm::ContextRef, pub llmod: llvm::ModuleRef, @@ -167,9 +218,9 @@ unsafe impl Sync for ModuleTranslation { } pub struct CrateTranslation { pub crate_name: Symbol, - pub modules: Vec, - pub metadata_module: ModuleTranslation, - pub allocator_module: Option, + pub modules: Vec, + pub metadata_module: CompiledModule, + pub allocator_module: Option, pub link: rustc::middle::cstore::LinkMeta, pub metadata: rustc::middle::cstore::EncodedMetadata, pub exported_symbols: Arc, @@ -189,7 +240,7 @@ pub struct OngoingCrateTranslation { pub no_integrated_as: bool, // This will be replaced by a Future. - pub result: ::std::cell::RefCell>, + pub result: ::std::cell::RefCell>, } impl OngoingCrateTranslation { From ccb970be4c28490a02ce8564e7d0bd00601ad322 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 10:27:24 +0200 Subject: [PATCH 087/213] async-llvm(9): Move OngoingCrateTranslation into back::write. --- src/librustc_driver/driver.rs | 4 +- src/librustc_trans/back/write.rs | 87 +++++++++++++++++++++++++++++--- src/librustc_trans/base.rs | 2 +- src/librustc_trans/lib.rs | 63 ----------------------- 4 files changed, 83 insertions(+), 73 deletions(-) diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index ff9f666af75d..44c046131f1b 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -1053,7 +1053,7 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis: ty::CrateAnalysis, incremental_hashes_map: &IncrementalHashesMap, output_filenames: &OutputFilenames) - -> trans::OngoingCrateTranslation { + -> write::OngoingCrateTranslation { let time_passes = tcx.sess.time_passes(); time(time_passes, @@ -1071,7 +1071,7 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. pub fn phase_5_run_llvm_passes(sess: &Session, - trans: trans::OngoingCrateTranslation, + trans: write::OngoingCrateTranslation, outputs: &OutputFilenames) -> (CompileResult, trans::CrateTranslation) { let trans = trans.join(sess, outputs); diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 83f7f574493e..987d88c7c613 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -10,23 +10,25 @@ use back::lto; use back::link::{self, get_linker, remove}; +use back::linker::LinkerInfo; use back::symbol_export::ExportedSymbols; use rustc_incremental::{save_trans_partition, in_incr_comp_dir}; +use rustc::middle::cstore::{LinkMeta, EncodedMetadata}; use rustc::session::config::{self, OutputFilenames, OutputType, OutputTypes, Passes, SomePasses, AllPasses, Sanitizer}; use rustc::session::Session; use llvm; use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef}; use llvm::SMDiagnosticRef; -use {CrateTranslation, OngoingCrateTranslation, ModuleSource, ModuleTranslation, - CompiledModule, ModuleKind}; +use {CrateTranslation, ModuleSource, ModuleTranslation, CompiledModule, ModuleKind}; use rustc::hir::def_id::CrateNum; use rustc::util::common::{time, time_depth, set_time_depth, path2cstr}; -use rustc::util::fs::link_or_copy; +use rustc::util::fs::{link_or_copy, rename_or_copy_remove}; use errors::{self, Handler, Level, DiagnosticBuilder, FatalError}; use errors::emitter::{Emitter}; use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; +use syntax_pos::symbol::Symbol; use context::{is_pie_binary, get_reloc_model}; use jobserver::{Client, Acquired}; use rustc_demangle; @@ -816,7 +818,7 @@ pub fn run_passes(sess: &Session, } loop { - shared_emitter_main.check(sess); + shared_emitter_main.check(sess, false); match trans_worker_receive.recv() { Err(_) => { @@ -837,7 +839,7 @@ pub fn run_passes(sess: &Session, let compiled_modules = coordinator_thread.join().unwrap(); // Just in case, check this on the way out. - shared_emitter_main.check(sess); + shared_emitter_main.check(sess, false); sess.diagnostic().abort_if_errors(); // If in incr. comp. mode, preserve the `.o` files for potential re-use @@ -1516,9 +1518,21 @@ impl Emitter for SharedEmitter { } impl SharedEmitterMain { - pub fn check(&self, sess: &Session) { + pub fn check(&self, sess: &Session, blocking: bool) { loop { - match self.receiver.try_recv() { + let message = if blocking { + match self.receiver.recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + } else { + match self.receiver.try_recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + }; + + match message { Ok(SharedEmitterMessage::Diagnostic(diag)) => { let handler = sess.diagnostic(); match diag.code { @@ -1555,3 +1569,62 @@ impl SharedEmitterMain { } } } + +pub struct OngoingCrateTranslation { + pub crate_name: Symbol, + pub link: LinkMeta, + pub metadata: EncodedMetadata, + pub exported_symbols: Arc, + pub no_builtins: bool, + pub windows_subsystem: Option, + pub linker_info: LinkerInfo, + pub no_integrated_as: bool, + + // This will be replaced by a Future. + pub result: ::std::cell::RefCell>, +} + +impl OngoingCrateTranslation { + pub fn join(self, + sess: &Session, + outputs: &OutputFilenames) + -> CrateTranslation { + + let result = self.result.borrow_mut().take().unwrap(); + + let trans = CrateTranslation { + crate_name: self.crate_name, + link: self.link, + metadata: self.metadata, + exported_symbols: self.exported_symbols, + no_builtins: self.no_builtins, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + + modules: result.modules, + metadata_module: result.metadata_module, + allocator_module: result.allocator_module, + }; + + if self.no_integrated_as { + run_assembler(sess, outputs); + + // HACK the linker expects the object file to be named foo.0.o but + // `run_assembler` produces an object named just foo.o. Rename it if we + // are going to build an executable + if sess.opts.output_types.contains_key(&OutputType::Exe) { + let f = outputs.path(OutputType::Object); + rename_or_copy_remove(&f, + f.with_file_name(format!("{}.0.o", + f.file_stem().unwrap().to_string_lossy()))).unwrap(); + } + + // Remove assembly source, unless --save-temps was specified + if !sess.opts.cg.save_temps { + fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap(); + } + } + + trans + } +} diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 7ece92ef9dd6..6eb38dc52eec 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -23,7 +23,6 @@ //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int, //! int) and rec(x=int, y=int, z=int) will have the same TypeRef. -use super::OngoingCrateTranslation; use super::ModuleLlvm; use super::ModuleSource; use super::ModuleTranslation; @@ -33,6 +32,7 @@ use assert_module_sources; use back::link; use back::linker::LinkerInfo; use back::symbol_export::{self, ExportedSymbols}; +use back::write::OngoingCrateTranslation; use llvm::{ContextRef, Linkage, ModuleRef, ValueRef, Vector, get_param}; use llvm; use metadata; diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 1df1bd272fd8..62ff1535be95 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -35,11 +35,7 @@ #![feature(conservative_impl_trait)] use rustc::dep_graph::WorkProduct; -use rustc::session::Session; -use rustc::session::config::{OutputType, OutputFilenames}; -use rustc::util::fs::rename_or_copy_remove; use syntax_pos::symbol::Symbol; -use std::fs; use std::sync::Arc; extern crate flate2; @@ -229,63 +225,4 @@ pub struct CrateTranslation { pub linker_info: back::linker::LinkerInfo } -pub struct OngoingCrateTranslation { - pub crate_name: Symbol, - pub link: rustc::middle::cstore::LinkMeta, - pub metadata: rustc::middle::cstore::EncodedMetadata, - pub exported_symbols: Arc, - pub no_builtins: bool, - pub windows_subsystem: Option, - pub linker_info: back::linker::LinkerInfo, - pub no_integrated_as: bool, - - // This will be replaced by a Future. - pub result: ::std::cell::RefCell>, -} - -impl OngoingCrateTranslation { - pub fn join(self, - sess: &Session, - outputs: &OutputFilenames) - -> CrateTranslation { - - let result = self.result.borrow_mut().take().unwrap(); - - let trans = CrateTranslation { - crate_name: self.crate_name, - link: self.link, - metadata: self.metadata, - exported_symbols: self.exported_symbols, - no_builtins: self.no_builtins, - windows_subsystem: self.windows_subsystem, - linker_info: self.linker_info, - - modules: result.modules, - metadata_module: result.metadata_module, - allocator_module: result.allocator_module, - }; - - if self.no_integrated_as { - back::write::run_assembler(sess, outputs); - - // HACK the linker expects the object file to be named foo.0.o but - // `run_assembler` produces an object named just foo.o. Rename it if we - // are going to build an executable - if sess.opts.output_types.contains_key(&OutputType::Exe) { - let f = outputs.path(OutputType::Object); - rename_or_copy_remove(&f, - f.with_file_name(format!("{}.0.o", - f.file_stem().unwrap().to_string_lossy()))).unwrap(); - } - - // Remove assembly source, unless --save-temps was specified - if !sess.opts.cg.save_temps { - fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap(); - } - } - - trans - } -} - __build_diagnostic_array! { librustc_trans, DIAGNOSTICS } From 28589ec3e474a7cce15f761d6bcd24f80aebdee1 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 10:48:54 +0200 Subject: [PATCH 088/213] async-llvm(10): Factor compile output files cleanup into separate functions. --- src/librustc_trans/back/write.rs | 190 +++++++++++++++++-------------- 1 file changed, 103 insertions(+), 87 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 987d88c7c613..bae50da3209d 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -656,6 +656,11 @@ pub struct CompiledModules { pub allocator_module: Option, } +fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { + sess.crate_types.borrow().contains(&config::CrateTypeRlib) && + sess.opts.output_types.contains_key(&OutputType::Exe) +} + pub fn run_passes(sess: &Session, trans: &OngoingCrateTranslation, modules: Vec, @@ -723,12 +728,7 @@ pub fn run_passes(sess: &Session, // Emit bitcode files for the crate if we're emitting an rlib. // Whenever an rlib is created, the bitcode is inserted into the // archive in order to allow LTO against it. - let needs_crate_bitcode = - sess.crate_types.borrow().contains(&config::CrateTypeRlib) && - sess.opts.output_types.contains_key(&OutputType::Exe); - let needs_crate_object = - sess.opts.output_types.contains_key(&OutputType::Exe); - if needs_crate_bitcode { + if need_crate_bitcode_for_rlib(sess) { modules_config.emit_bc = true; } @@ -842,7 +842,26 @@ pub fn run_passes(sess: &Session, shared_emitter_main.check(sess, false); sess.diagnostic().abort_if_errors(); - // If in incr. comp. mode, preserve the `.o` files for potential re-use + copy_module_artifacts_into_incr_comp_cache(sess, &compiled_modules, crate_output); + + produce_final_output_artifacts(sess, &compiled_modules, crate_output); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } + + *trans.result.borrow_mut() = Some(compiled_modules); +} + +fn copy_module_artifacts_into_incr_comp_cache(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + if sess.opts.incremental.is_none() { + return; + } + for module in compiled_modules.modules.iter() { let mut files = vec![]; @@ -858,86 +877,88 @@ pub fn run_passes(sess: &Session, save_trans_partition(sess, &module.name, module.symbol_name_hash, &files); } +} +fn produce_final_output_artifacts(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { let mut user_wants_bitcode = false; let mut user_wants_objects = false; - { - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &Path| { - if let Err(e) = fs::copy(from, to) { - sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); - } - }; - let copy_if_one_unit = |output_type: OutputType, - keep_numbered: bool| { - if compiled_modules.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&compiled_modules.modules[0].name[..]); - let path = crate_output.temp_path(output_type, module_name); - copy_gracefully(&path, - &crate_output.path(output_type)); - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - remove(sess, &path); - } + // Produce final compile outputs. + let copy_gracefully = |from: &Path, to: &Path| { + if let Err(e) = fs::copy(from, to) { + sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); + } + }; + + let copy_if_one_unit = |output_type: OutputType, + keep_numbered: bool| { + if compiled_modules.modules.len() == 1 { + // 1) Only one codegen unit. In this case it's no difficulty + // to copy `foo.0.x` to `foo.x`. + let module_name = Some(&compiled_modules.modules[0].name[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, + &crate_output.path(output_type)); + if !sess.opts.cg.save_temps && !keep_numbered { + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); + } + } else { + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); } else { - let ext = crate_output.temp_path(output_type, None) - .extension() - .unwrap() - .to_str() - .unwrap() - .to_owned(); - - if crate_output.outputs.contains_key(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring emit path because multiple .{} files \ - were produced", ext)); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring -o because multiple .{} files \ - were produced", ext)); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; - - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. - for output_type in output_types.keys() { - match *output_type { - OutputType::Bitcode => { - user_wants_bitcode = true; - // Copy to .bc, but always keep the .0.bc. There is a later - // check to figure out if we should delete .0.bc files, or keep - // them for making an rlib. - copy_if_one_unit(OutputType::Bitcode, true); - } - OutputType::LlvmAssembly => { - copy_if_one_unit(OutputType::LlvmAssembly, false); - } - OutputType::Assembly => { - copy_if_one_unit(OutputType::Assembly, false); - } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | - OutputType::Metadata | - OutputType::Exe | - OutputType::DepInfo => {} + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) } } + }; + + // Flag to indicate whether the user explicitly requested bitcode. + // Otherwise, we produced it only as a temporary output, and will need + // to get rid of it. + for output_type in crate_output.outputs.keys() { + match *output_type { + OutputType::Bitcode => { + user_wants_bitcode = true; + // Copy to .bc, but always keep the .0.bc. There is a later + // check to figure out if we should delete .0.bc files, or keep + // them for making an rlib. + copy_if_one_unit(OutputType::Bitcode, true); + } + OutputType::LlvmAssembly => { + copy_if_one_unit(OutputType::LlvmAssembly, false); + } + OutputType::Assembly => { + copy_if_one_unit(OutputType::Assembly, false); + } + OutputType::Object => { + user_wants_objects = true; + copy_if_one_unit(OutputType::Object, true); + } + OutputType::Mir | + OutputType::Metadata | + OutputType::Exe | + OutputType::DepInfo => {} + } } - let user_wants_bitcode = user_wants_bitcode; // Clean up unwanted temporary files. @@ -969,6 +990,9 @@ pub fn run_passes(sess: &Session, // If you change how this works, also update back::link::link_rlib, // where .#module-name#.bc files are (maybe) deleted after making an // rlib. + let needs_crate_bitcode = need_crate_bitcode_for_rlib(sess); + let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); + let keep_numbered_bitcode = needs_crate_bitcode || (user_wants_bitcode && sess.opts.cg.codegen_units > 1); @@ -1009,14 +1033,6 @@ pub fn run_passes(sess: &Session, // - #crate#.crate.metadata.o // - #crate#.bc // These are used in linking steps and will be cleaned up afterward. - - // FIXME: time_llvm_passes support - does this use a global context or - // something? - if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { - unsafe { llvm::LLVMRustPrintPassTimings(); } - } - - *trans.result.borrow_mut() = Some(compiled_modules); } pub fn dump_incremental_data(trans: &CrateTranslation) { From f3ce50558f6ba3f42011833a36a43f8026bf4863 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 11:41:34 +0200 Subject: [PATCH 089/213] async-llvm(11): Delay joining ongoing translation until right before linking. --- src/librustc_trans/back/write.rs | 103 ++++++++++++++----------------- src/librustc_trans/base.rs | 99 ++++++++++++++--------------- 2 files changed, 95 insertions(+), 107 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index bae50da3209d..c33d65e3e536 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -259,10 +259,10 @@ impl ModuleConfig { } } - fn set_flags(&mut self, sess: &Session, trans: &OngoingCrateTranslation) { + fn set_flags(&mut self, sess: &Session, no_builtins: bool) { self.no_verify = sess.no_verify(); self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; - self.no_builtins = trans.no_builtins; + self.no_builtins = no_builtins; self.time_passes = sess.time_passes(); self.inline_threshold = sess.opts.cg.inline_threshold; self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode; @@ -662,12 +662,21 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { } pub fn run_passes(sess: &Session, - trans: &OngoingCrateTranslation, modules: Vec, metadata_module: ModuleTranslation, allocator_module: Option, - output_types: &OutputTypes, - crate_output: &OutputFilenames) { + output_types_override: &OutputTypes, + crate_output: &OutputFilenames, + + crate_name: Symbol, + link: LinkMeta, + metadata: EncodedMetadata, + exported_symbols: Arc, + no_builtins: bool, + windows_subsystem: Option, + linker_info: LinkerInfo, + no_integrated_as: bool) + -> OngoingCrateTranslation { // It's possible that we have `codegen_units > 1` but only one item in // `trans.modules`. We could theoretically proceed and do LTO in that // case, but it would be confusing to have the validity of @@ -732,7 +741,7 @@ pub fn run_passes(sess: &Session, modules_config.emit_bc = true; } - for output_type in output_types.keys() { + for output_type in output_types_override.keys() { match *output_type { OutputType::Bitcode => { modules_config.emit_bc = true; } OutputType::LlvmAssembly => { modules_config.emit_ir = true; } @@ -758,9 +767,9 @@ pub fn run_passes(sess: &Session, } } - modules_config.set_flags(sess, trans); - metadata_config.set_flags(sess, trans); - allocator_config.set_flags(sess, trans); + modules_config.set_flags(sess, no_builtins); + metadata_config.set_flags(sess, no_builtins); + allocator_config.set_flags(sess, no_builtins); // Populate a buffer with a list of codegen threads. Items are processed in @@ -797,12 +806,8 @@ pub fn run_passes(sess: &Session, Client::new(num_workers).expect("failed to create jobserver") }); - drop(modules_config); - drop(metadata_config); - drop(allocator_config); - let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); - let (trans_worker_send, trans_worker_receive) = channel(); + let (trans_worker_send, _trans_worker_receive) = channel(); let (coordinator_send, coordinator_receive) = channel(); let coordinator_thread = start_executing_work(sess, @@ -812,47 +817,24 @@ pub fn run_passes(sess: &Session, coordinator_send.clone(), coordinator_receive, client, - trans.exported_symbols.clone()); + exported_symbols.clone()); for work_item in work_items { coordinator_send.send(Message::WorkItem(work_item)).unwrap(); } - loop { - shared_emitter_main.check(sess, false); + OngoingCrateTranslation { + crate_name, + link, + metadata, + exported_symbols, + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as, - match trans_worker_receive.recv() { - Err(_) => { - // An `Err` here means that all senders for this channel have - // been closed. This could happen because all work has - // completed successfully or there has been some error. - // At this point we don't care which it is. - break - } - - Ok(Message::CheckErrorMessages) => continue, - Ok(msg) => { - bug!("unexpected message {:?}", msg); - } - } + shared_emitter_main, + future: coordinator_thread } - - let compiled_modules = coordinator_thread.join().unwrap(); - - // Just in case, check this on the way out. - shared_emitter_main.check(sess, false); - sess.diagnostic().abort_if_errors(); - - copy_module_artifacts_into_incr_comp_cache(sess, &compiled_modules, crate_output); - - produce_final_output_artifacts(sess, &compiled_modules, crate_output); - - // FIXME: time_llvm_passes support - does this use a global context or - // something? - if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { - unsafe { llvm::LLVMRustPrintPassTimings(); } - } - - *trans.result.borrow_mut() = Some(compiled_modules); } fn copy_module_artifacts_into_incr_comp_cache(sess: &Session, @@ -1596,8 +1578,8 @@ pub struct OngoingCrateTranslation { pub linker_info: LinkerInfo, pub no_integrated_as: bool, - // This will be replaced by a Future. - pub result: ::std::cell::RefCell>, + shared_emitter_main: SharedEmitterMain, + future: thread::JoinHandle, } impl OngoingCrateTranslation { @@ -1605,8 +1587,19 @@ impl OngoingCrateTranslation { sess: &Session, outputs: &OutputFilenames) -> CrateTranslation { + self.shared_emitter_main.check(sess, true); + let compiled_modules = self.future.join().unwrap(); - let result = self.result.borrow_mut().take().unwrap(); + sess.abort_if_errors(); + + copy_module_artifacts_into_incr_comp_cache(sess, &compiled_modules, outputs); + produce_final_output_artifacts(sess, &compiled_modules, outputs); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.opts.cg.codegen_units == 1 && sess.time_llvm_passes() { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } let trans = CrateTranslation { crate_name: self.crate_name, @@ -1617,9 +1610,9 @@ impl OngoingCrateTranslation { windows_subsystem: self.windows_subsystem, linker_info: self.linker_info, - modules: result.modules, - metadata_module: result.metadata_module, - allocator_module: result.allocator_module, + modules: compiled_modules.modules, + metadata_module: compiled_modules.metadata_module, + allocator_module: compiled_modules.allocator_module, }; if self.no_integrated_as { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 6eb38dc52eec..65041e60fe39 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -32,7 +32,7 @@ use assert_module_sources; use back::link; use back::linker::LinkerInfo; use back::symbol_export::{self, ExportedSymbols}; -use back::write::OngoingCrateTranslation; +use back::write::{self, OngoingCrateTranslation}; use llvm::{ContextRef, Linkage, ModuleRef, ValueRef, Vector, get_param}; use llvm; use metadata; @@ -963,27 +963,21 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, !tcx.sess.opts.output_types.should_trans() { let empty_exported_symbols = ExportedSymbols::empty(); let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); - let crate_translation = OngoingCrateTranslation { - crate_name: tcx.crate_name(LOCAL_CRATE), - link: link_meta, - metadata: metadata, - exported_symbols: Arc::new(empty_exported_symbols), - no_builtins: no_builtins, - linker_info: linker_info, - windows_subsystem: None, - no_integrated_as: false, - result: ::std::cell::RefCell::new(None), - }; + return write::run_passes(tcx.sess, + vec![], + metadata_module, + None, + &output_filenames.outputs, + output_filenames, - ::back::write::run_passes(tcx.sess, - &crate_translation, - vec![], - metadata_module, - None, - &output_filenames.outputs, - output_filenames); - - return crate_translation; + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + Arc::new(empty_exported_symbols), + no_builtins, + None, + linker_info, + false); } let exported_symbols = Arc::new(ExportedSymbols::compute(tcx, @@ -1231,19 +1225,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (outputs.outputs.contains_key(&OutputType::Object) || outputs.outputs.contains_key(&OutputType::Exe))); - let crate_translation = OngoingCrateTranslation { - crate_name: tcx.crate_name(LOCAL_CRATE), - link: link_meta, - metadata: metadata, - exported_symbols, - no_builtins, - linker_info, - windows_subsystem, - no_integrated_as, - - result: ::std::cell::RefCell::new(None), - }; - time(sess.time_passes(), "assert dep graph", || rustc_incremental::assert_dep_graph(tcx)); @@ -1252,34 +1233,48 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "serialize dep graph", || rustc_incremental::save_dep_graph(tcx, incremental_hashes_map, - &crate_translation.metadata.hashes, - crate_translation.link.crate_hash)); + &metadata.hashes, + link_meta.crate_hash)); // --- if no_integrated_as { let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]); time(sess.time_passes(), "LLVM passes", - || ::back::write::run_passes(sess, - &crate_translation, - modules, - metadata_module, - allocator_module, - &output_types, - outputs)) + || write::run_passes(sess, + modules, + metadata_module, + allocator_module, + &output_types, + outputs, + + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + exported_symbols, + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as)) } else { time(sess.time_passes(), "LLVM passes", - || ::back::write::run_passes(sess, - &crate_translation, - modules, - metadata_module, - allocator_module, - &sess.opts.output_types, - outputs)) - }; + || write::run_passes(sess, + modules, + metadata_module, + allocator_module, + &sess.opts.output_types, + outputs, - crate_translation + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + exported_symbols, + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as)) + } } #[inline(never)] // give this a place in the profiler From 397b2a800f7a25e81c2aaab2ac0291adbfdce3ce Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 11:50:45 +0200 Subject: [PATCH 090/213] async-llvm(12): Hide no_integrated_as logic in write::run_passes. --- src/librustc_trans/back/write.rs | 7 +++- src/librustc_trans/base.rs | 55 ++++++++++---------------------- 2 files changed, 22 insertions(+), 40 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index c33d65e3e536..280951e0dc87 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -665,7 +665,6 @@ pub fn run_passes(sess: &Session, modules: Vec, metadata_module: ModuleTranslation, allocator_module: Option, - output_types_override: &OutputTypes, crate_output: &OutputFilenames, crate_name: Symbol, @@ -690,6 +689,12 @@ pub fn run_passes(sess: &Session, sess.fatal("can't perform LTO when using multiple codegen units"); } + let output_types_override = if no_integrated_as { + OutputTypes::new(&[(OutputType::Assembly, None)]) + } else { + sess.opts.output_types.clone() + }; + // Sanity check assert!(modules.len() == sess.opts.cg.codegen_units || sess.opts.debugging_opts.incremental.is_some() || diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 65041e60fe39..53fb330e364d 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -44,7 +44,7 @@ use rustc::dep_graph::AssertDepGraphSafe; use rustc::middle::cstore::LinkMeta; use rustc::hir::map as hir_map; use rustc::util::common::time; -use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType, OutputTypes}; +use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType}; use rustc::session::Session; use rustc_incremental::{self, IncrementalHashesMap}; use abi; @@ -967,7 +967,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, vec![], metadata_module, None, - &output_filenames.outputs, output_filenames, tcx.crate_name(LOCAL_CRATE), @@ -1237,44 +1236,22 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta.crate_hash)); // --- - if no_integrated_as { - let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]); - time(sess.time_passes(), - "LLVM passes", - || write::run_passes(sess, - modules, - metadata_module, - allocator_module, - &output_types, - outputs, + time(sess.time_passes(), + "LLVM passes", + || write::run_passes(sess, + modules, + metadata_module, + allocator_module, + outputs, - tcx.crate_name(LOCAL_CRATE), - link_meta, - metadata, - exported_symbols, - no_builtins, - windows_subsystem, - linker_info, - no_integrated_as)) - } else { - time(sess.time_passes(), - "LLVM passes", - || write::run_passes(sess, - modules, - metadata_module, - allocator_module, - &sess.opts.output_types, - outputs, - - tcx.crate_name(LOCAL_CRATE), - link_meta, - metadata, - exported_symbols, - no_builtins, - windows_subsystem, - linker_info, - no_integrated_as)) - } + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + exported_symbols, + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as)) } #[inline(never)] // give this a place in the profiler From b924ec1484bfca00c42a8aff68d77c41d4cd1ea6 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 12:35:23 +0200 Subject: [PATCH 091/213] async-llvm(13): Submit LLVM work packages from base::trans_crate(). --- src/librustc_driver/driver.rs | 7 +-- src/librustc_trans/back/write.rs | 97 +++++++++++++++----------------- src/librustc_trans/base.rs | 69 ++++++++++++++--------- 3 files changed, 88 insertions(+), 85 deletions(-) diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 44c046131f1b..ba4a6c0d67dd 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -229,7 +229,7 @@ pub fn compile_input(sess: &Session, sess.code_stats.borrow().print_type_sizes(); } - let (phase5_result, trans) = phase_5_run_llvm_passes(sess, trans, &outputs); + let (phase5_result, trans) = phase_5_run_llvm_passes(sess, trans); controller_entry_point!(after_llvm, sess, @@ -1071,10 +1071,9 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. pub fn phase_5_run_llvm_passes(sess: &Session, - trans: write::OngoingCrateTranslation, - outputs: &OutputFilenames) + trans: write::OngoingCrateTranslation) -> (CompileResult, trans::CrateTranslation) { - let trans = trans.join(sess, outputs); + let trans = trans.join(sess); if sess.opts.debugging_opts.incremental_info { write::dump_incremental_data(&trans); diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 280951e0dc87..c1c85394698c 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -662,11 +662,8 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { } pub fn run_passes(sess: &Session, - modules: Vec, - metadata_module: ModuleTranslation, - allocator_module: Option, crate_output: &OutputFilenames, - + total_work_item_count: usize, crate_name: Symbol, link: LinkMeta, metadata: EncodedMetadata, @@ -695,12 +692,6 @@ pub fn run_passes(sess: &Session, sess.opts.output_types.clone() }; - // Sanity check - assert!(modules.len() == sess.opts.cg.codegen_units || - sess.opts.debugging_opts.incremental.is_some() || - !sess.opts.output_types.should_trans() || - sess.opts.debugging_opts.no_trans); - // Figure out what we actually need to build. let mut modules_config = ModuleConfig::new(sess, sess.opts.cg.passes.clone()); @@ -776,38 +767,11 @@ pub fn run_passes(sess: &Session, metadata_config.set_flags(sess, no_builtins); allocator_config.set_flags(sess, no_builtins); - - // Populate a buffer with a list of codegen threads. Items are processed in - // LIFO order, just because it's a tiny bit simpler that way. (The order - // doesn't actually matter.) - let mut work_items = Vec::with_capacity(1 + modules.len()); - - { - let work = build_work_item(metadata_module, - metadata_config.clone(sess), - crate_output.clone()); - work_items.push(work); - } - - if let Some(allocator) = allocator_module { - let work = build_work_item(allocator, - allocator_config.clone(sess), - crate_output.clone()); - work_items.push(work); - } - - for mtrans in modules { - let work = build_work_item(mtrans, - modules_config.clone(sess), - crate_output.clone()); - work_items.push(work); - } - let client = sess.jobserver_from_env.clone().unwrap_or_else(|| { // Pick a "reasonable maximum" if we don't otherwise have a jobserver in // our environment, capping out at 32 so we don't take everything down // by hogging the process run queue. - let num_workers = cmp::min(work_items.len() - 1, 32); + let num_workers = cmp::min(total_work_item_count - 1, 32); Client::new(num_workers).expect("failed to create jobserver") }); @@ -816,16 +780,13 @@ pub fn run_passes(sess: &Session, let (coordinator_send, coordinator_receive) = channel(); let coordinator_thread = start_executing_work(sess, - work_items.len(), + total_work_item_count, shared_emitter, trans_worker_send, coordinator_send.clone(), coordinator_receive, client, exported_symbols.clone()); - for work_item in work_items { - coordinator_send.send(Message::WorkItem(work_item)).unwrap(); - } OngoingCrateTranslation { crate_name, @@ -837,6 +798,12 @@ pub fn run_passes(sess: &Session, linker_info, no_integrated_as, + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + + output_filenames: crate_output.clone(), + coordinator_send, shared_emitter_main, future: coordinator_thread } @@ -1583,22 +1550,29 @@ pub struct OngoingCrateTranslation { pub linker_info: LinkerInfo, pub no_integrated_as: bool, + output_filenames: OutputFilenames, + regular_module_config: ModuleConfig, + metadata_module_config: ModuleConfig, + allocator_module_config: ModuleConfig, + + coordinator_send: Sender, shared_emitter_main: SharedEmitterMain, future: thread::JoinHandle, } impl OngoingCrateTranslation { - pub fn join(self, - sess: &Session, - outputs: &OutputFilenames) - -> CrateTranslation { + pub fn join(self, sess: &Session) -> CrateTranslation { self.shared_emitter_main.check(sess, true); let compiled_modules = self.future.join().unwrap(); sess.abort_if_errors(); - copy_module_artifacts_into_incr_comp_cache(sess, &compiled_modules, outputs); - produce_final_output_artifacts(sess, &compiled_modules, outputs); + copy_module_artifacts_into_incr_comp_cache(sess, + &compiled_modules, + &self.output_filenames); + produce_final_output_artifacts(sess, + &compiled_modules, + &self.output_filenames); // FIXME: time_llvm_passes support - does this use a global context or // something? @@ -1621,24 +1595,41 @@ impl OngoingCrateTranslation { }; if self.no_integrated_as { - run_assembler(sess, outputs); + run_assembler(sess, &self.output_filenames); // HACK the linker expects the object file to be named foo.0.o but // `run_assembler` produces an object named just foo.o. Rename it if we // are going to build an executable if sess.opts.output_types.contains_key(&OutputType::Exe) { - let f = outputs.path(OutputType::Object); + let f = self.output_filenames.path(OutputType::Object); rename_or_copy_remove(&f, - f.with_file_name(format!("{}.0.o", - f.file_stem().unwrap().to_string_lossy()))).unwrap(); + f.with_file_name(format!("{}.0.o", + f.file_stem().unwrap().to_string_lossy()))).unwrap(); } // Remove assembly source, unless --save-temps was specified if !sess.opts.cg.save_temps { - fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap(); + fs::remove_file(&self.output_filenames + .temp_path(OutputType::Assembly, None)).unwrap(); } } trans } + + pub fn submit_translated_module_to_llvm(&self, + sess: &Session, + mtrans: ModuleTranslation) { + let module_config = match mtrans.kind { + ModuleKind::Regular => self.regular_module_config.clone(sess), + ModuleKind::Metadata => self.metadata_module_config.clone(sess), + ModuleKind::Allocator => self.allocator_module_config.clone(sess), + }; + + let work_item = build_work_item(mtrans, + module_config, + self.output_filenames.clone()); + + drop(self.coordinator_send.send(Message::WorkItem(work_item))); + } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 53fb330e364d..144c1efd23b7 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -963,20 +963,22 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, !tcx.sess.opts.output_types.should_trans() { let empty_exported_symbols = ExportedSymbols::empty(); let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); - return write::run_passes(tcx.sess, - vec![], - metadata_module, - None, - output_filenames, + let ongoing_translation = write::run_passes( + tcx.sess, + output_filenames, + 1, + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + Arc::new(empty_exported_symbols), + no_builtins, + None, + linker_info, + false); - tcx.crate_name(LOCAL_CRATE), - link_meta, - metadata, - Arc::new(empty_exported_symbols), - no_builtins, - None, - linker_info, - false); + ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module); + + return ongoing_translation; } let exported_symbols = Arc::new(ExportedSymbols::compute(tcx, @@ -1236,22 +1238,33 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta.crate_hash)); // --- - time(sess.time_passes(), - "LLVM passes", - || write::run_passes(sess, - modules, - metadata_module, - allocator_module, - outputs, + let total_module_count = modules.len() + 1 + + if allocator_module.is_some() { 1 } else { 0 }; - tcx.crate_name(LOCAL_CRATE), - link_meta, - metadata, - exported_symbols, - no_builtins, - windows_subsystem, - linker_info, - no_integrated_as)) + let ongoing_translation = write::run_passes( + sess, + outputs, + total_module_count, + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + exported_symbols, + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as); + + ongoing_translation.submit_translated_module_to_llvm(sess, metadata_module); + + for mtrans in modules { + ongoing_translation.submit_translated_module_to_llvm(sess, mtrans); + } + + if let Some(allocator_module) = allocator_module { + ongoing_translation.submit_translated_module_to_llvm(sess, allocator_module); + } + + ongoing_translation } #[inline(never)] // give this a place in the profiler From a1be65845c1a48c724961b136cf98b4d4b5e972d Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 14:14:40 +0200 Subject: [PATCH 092/213] async-llvm(14): Move LTO/codegen-unit conflict check to beginning of compilation process. --- src/librustc/session/config.rs | 17 +++++++++++++++++ src/librustc_trans/back/write.rs | 13 ------------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 8b55eb4c099a..8c4cc20deb7b 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1498,6 +1498,23 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) early_error(error_format, "Value for codegen units must be a positive nonzero integer"); } + // It's possible that we have `codegen_units > 1` but only one item in + // `trans.modules`. We could theoretically proceed and do LTO in that + // case, but it would be confusing to have the validity of + // `-Z lto -C codegen-units=2` depend on details of the crate being + // compiled, so we complain regardless. + if cg.lto && cg.codegen_units > 1 { + // This case is impossible to handle because LTO expects to be able + // to combine the entire crate and all its dependencies into a + // single compilation unit, but each codegen unit is in a separate + // LLVM context, so they can't easily be combined. + early_error(error_format, "can't perform LTO when using multiple codegen units"); + } + + if cg.lto && debugging_opts.incremental.is_some() { + early_error(error_format, "can't perform LTO when compiling incrementally"); + } + let mut prints = Vec::::new(); if cg.target_cpu.as_ref().map_or(false, |s| s == "help") { prints.push(PrintRequest::TargetCPUs); diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index c1c85394698c..967b7d0eb62c 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -673,19 +673,6 @@ pub fn run_passes(sess: &Session, linker_info: LinkerInfo, no_integrated_as: bool) -> OngoingCrateTranslation { - // It's possible that we have `codegen_units > 1` but only one item in - // `trans.modules`. We could theoretically proceed and do LTO in that - // case, but it would be confusing to have the validity of - // `-Z lto -C codegen-units=2` depend on details of the crate being - // compiled, so we complain regardless. - if sess.lto() && sess.opts.cg.codegen_units > 1 { - // This case is impossible to handle because LTO expects to be able - // to combine the entire crate and all its dependencies into a - // single compilation unit, but each codegen unit is in a separate - // LLVM context, so they can't easily be combined. - sess.fatal("can't perform LTO when using multiple codegen units"); - } - let output_types_override = if no_integrated_as { OutputTypes::new(&[(OutputType::Assembly, None)]) } else { From 943a5bdf35a27239013b0aced68588034d366d0f Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 14:18:11 +0200 Subject: [PATCH 093/213] async-llvm(15): Don't require number of codegen units upfront. --- src/librustc_trans/back/write.rs | 48 +++++++++++++++----------------- src/librustc_trans/base.rs | 9 ++---- 2 files changed, 25 insertions(+), 32 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 967b7d0eb62c..118853b87123 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -33,7 +33,6 @@ use context::{is_pie_binary, get_reloc_model}; use jobserver::{Client, Acquired}; use rustc_demangle; -use std::cmp; use std::ffi::CString; use std::fmt; use std::fs; @@ -663,7 +662,6 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { pub fn run_passes(sess: &Session, crate_output: &OutputFilenames, - total_work_item_count: usize, crate_name: Symbol, link: LinkMeta, metadata: EncodedMetadata, @@ -758,8 +756,7 @@ pub fn run_passes(sess: &Session, // Pick a "reasonable maximum" if we don't otherwise have a jobserver in // our environment, capping out at 32 so we don't take everything down // by hogging the process run queue. - let num_workers = cmp::min(total_work_item_count - 1, 32); - Client::new(num_workers).expect("failed to create jobserver") + Client::new(32).expect("failed to create jobserver") }); let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); @@ -767,14 +764,12 @@ pub fn run_passes(sess: &Session, let (coordinator_send, coordinator_receive) = channel(); let coordinator_thread = start_executing_work(sess, - total_work_item_count, shared_emitter, trans_worker_send, coordinator_send.clone(), coordinator_receive, client, exported_symbols.clone()); - OngoingCrateTranslation { crate_name, link, @@ -1072,6 +1067,7 @@ pub enum Message { Done { result: Result }, WorkItem(WorkItem), CheckErrorMessages, + TranslationDone, } @@ -1082,7 +1078,6 @@ pub struct Diagnostic { } fn start_executing_work(sess: &Session, - total_work_item_count: usize, shared_emitter: SharedEmitter, trans_worker_send: Sender, coordinator_send: Sender, @@ -1104,9 +1099,6 @@ fn start_executing_work(sess: &Session, let helper = jobserver.into_helper_thread(move |token| { drop(coordinator_send2.send(Message::Token(token))); }).expect("failed to spawn helper thread"); - for _ in 0..total_work_item_count - 1 { - helper.request_token(); - } let mut each_linked_rlib_for_lto = Vec::new(); drop(link::each_linked_rlib(sess, &mut |cnum, path| { @@ -1193,29 +1185,25 @@ fn start_executing_work(sess: &Session, let mut compiled_metadata_module = None; let mut compiled_allocator_module = None; - let mut work_items_left = total_work_item_count; - let mut work_items = Vec::with_capacity(total_work_item_count); + let mut translation_done = false; + let mut work_items = Vec::new(); let mut tokens = Vec::new(); let mut running = 0; - while work_items_left > 0 || running > 0 { + while !translation_done || work_items.len() > 0 || running > 0 { // Spin up what work we can, only doing this while we've got available // parallelism slots and work left to spawn. - while work_items_left > 0 && running < tokens.len() + 1 { - if let Some(item) = work_items.pop() { - work_items_left -= 1; - let worker_index = work_items_left; + while work_items.len() > 0 && running < tokens.len() + 1 { + let item = work_items.pop().unwrap(); + let worker_index = work_items.len(); - let cgcx = CodegenContext { - worker: worker_index, - .. cgcx.clone() - }; + let cgcx = CodegenContext { + worker: worker_index, + .. cgcx.clone() + }; - spawn_work(cgcx, item); - running += 1; - } else { - break - } + spawn_work(cgcx, item); + running += 1; } // Relinquish accidentally acquired extra tokens @@ -1238,6 +1226,7 @@ fn start_executing_work(sess: &Session, Message::WorkItem(work_item) => { work_items.push(work_item); + helper.request_token(); } // If a thread exits successfully then we drop a token associated @@ -1273,6 +1262,9 @@ fn start_executing_work(sess: &Session, // Exit the coordinator thread panic!() } + Message::TranslationDone => { + translation_done = true; + } msg @ Message::CheckErrorMessages => { bug!("unexpected message: {:?}", msg); } @@ -1619,4 +1611,8 @@ impl OngoingCrateTranslation { drop(self.coordinator_send.send(Message::WorkItem(work_item))); } + + pub fn signal_translation_done(&self) { + drop(self.coordinator_send.send(Message::TranslationDone)); + } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 144c1efd23b7..c22bc617baaa 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -966,7 +966,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let ongoing_translation = write::run_passes( tcx.sess, output_filenames, - 1, tcx.crate_name(LOCAL_CRATE), link_meta, metadata, @@ -977,6 +976,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, false); ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module); + ongoing_translation.signal_translation_done(); return ongoing_translation; } @@ -1237,14 +1237,9 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, &metadata.hashes, link_meta.crate_hash)); // --- - - let total_module_count = modules.len() + 1 + - if allocator_module.is_some() { 1 } else { 0 }; - let ongoing_translation = write::run_passes( sess, outputs, - total_module_count, tcx.crate_name(LOCAL_CRATE), link_meta, metadata, @@ -1264,6 +1259,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ongoing_translation.submit_translated_module_to_llvm(sess, allocator_module); } + ongoing_translation.signal_translation_done(); + ongoing_translation } From 0ad9eaa998d597bfa9597c4d6c751cfb66ed2e7e Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 14:21:01 +0200 Subject: [PATCH 094/213] async-llvm(16): Inject allocator shim into LLVM module immediately if necessary. --- src/librustc_trans/base.rs | 39 ++++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index c22bc617baaa..6088b8e479b6 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -989,6 +989,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let (translation_items, codegen_units) = collect_and_partition_translation_items(&shared_ccx, &exported_symbols); + assert!(codegen_units.len() <= 1 || !tcx.sess.lto()); + let translation_items = Arc::new(translation_items); let mut all_stats = Stats::default(); @@ -1106,13 +1108,27 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debuginfo::finalize(&ccx); } + let llvm_module = ModuleLlvm { + llcx: ccx.llcx(), + llmod: ccx.llmod(), + }; + + // In LTO mode we inject the allocator shim into the existing + // module. + if ccx.sess().lto() { + if let Some(kind) = ccx.sess().allocator_kind.get() { + time(ccx.sess().time_passes(), "write allocator module", || { + unsafe { + allocator::trans(ccx.tcx(), &llvm_module, kind); + } + }); + } + } + ModuleTranslation { name: cgu_name, symbol_name_hash, - source: ModuleSource::Translated(ModuleLlvm { - llcx: ccx.llcx(), - llmod: ccx.llmod(), - }), + source: ModuleSource::Translated(llvm_module), kind: ModuleKind::Regular, } }; @@ -1180,13 +1196,10 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // links in an object file that has allocator functions. When we're // compiling a final LTO artifact, though, there's no need to worry about // this as we're not working with this dual "rlib/dylib" functionality. - let allocator_module = tcx.sess.allocator_kind.get().and_then(|kind| unsafe { - if sess.lto() && llvm_modules.len() > 0 { - time(tcx.sess.time_passes(), "write allocator module", || { - allocator::trans(tcx, &llvm_modules[0], kind) - }); - None - } else { + let allocator_module = if tcx.sess.lto() { + None + } else if let Some(kind) = tcx.sess.allocator_kind.get() { + unsafe { let (llcx, llmod) = context::create_context_and_module(tcx.sess, "allocator"); let modules = ModuleLlvm { @@ -1204,7 +1217,9 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, kind: ModuleKind::Allocator, }) } - }); + } else { + None + }; let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols); From e7d0fa340f904829abf28907c7f1add11a65389e Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 14:29:13 +0200 Subject: [PATCH 095/213] async-llvm(17): Create MSVC __imp_ symbols immediately for each module. --- src/librustc_trans/base.rs | 64 ++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 6088b8e479b6..9e6fe5ab4349 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -804,7 +804,7 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, // code references on its own. // See #26591, #27438 fn create_imps(sess: &Session, - llvm_modules: &[ModuleLlvm]) { + llvm_module: &ModuleLlvm) { // The x86 ABI seems to require that leading underscores are added to symbol // names, so we need an extra underscore on 32-bit. There's also a leading // '\x01' here which disables LLVM's symbol mangling (e.g. no extra @@ -815,28 +815,26 @@ fn create_imps(sess: &Session, "\x01__imp_" }; unsafe { - for ll in llvm_modules { - let exported: Vec<_> = iter_globals(ll.llmod) - .filter(|&val| { - llvm::LLVMRustGetLinkage(val) == - llvm::Linkage::ExternalLinkage && - llvm::LLVMIsDeclaration(val) == 0 - }) - .collect(); + let exported: Vec<_> = iter_globals(llvm_module.llmod) + .filter(|&val| { + llvm::LLVMRustGetLinkage(val) == + llvm::Linkage::ExternalLinkage && + llvm::LLVMIsDeclaration(val) == 0 + }) + .collect(); - let i8p_ty = Type::i8p_llcx(ll.llcx); - for val in exported { - let name = CStr::from_ptr(llvm::LLVMGetValueName(val)); - let mut imp_name = prefix.as_bytes().to_vec(); - imp_name.extend(name.to_bytes()); - let imp_name = CString::new(imp_name).unwrap(); - let imp = llvm::LLVMAddGlobal(ll.llmod, - i8p_ty.to_ref(), - imp_name.as_ptr() as *const _); - let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref()); - llvm::LLVMSetInitializer(imp, init); - llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); - } + let i8p_ty = Type::i8p_llcx(llvm_module.llcx); + for val in exported { + let name = CStr::from_ptr(llvm::LLVMGetValueName(val)); + let mut imp_name = prefix.as_bytes().to_vec(); + imp_name.extend(name.to_bytes()); + let imp_name = CString::new(imp_name).unwrap(); + let imp = llvm::LLVMAddGlobal(llvm_module.llmod, + i8p_ty.to_ref(), + imp_name.as_ptr() as *const _); + let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref()); + llvm::LLVMSetInitializer(imp, init); + llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } } @@ -1125,6 +1123,12 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } + // Adjust exported symbols for MSVC dllimport + if ccx.sess().target.target.options.is_like_msvc && + ccx.sess().crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { + create_imps(ccx.sess(), &llvm_module); + } + ModuleTranslation { name: cgu_name, symbol_name_hash, @@ -1170,22 +1174,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let sess = shared_ccx.sess(); - // Get the list of llvm modules we created. We'll do a few wacky - // transforms on them now. - - let llvm_modules: Vec<_> = - modules.iter() - .filter_map(|module| match module.source { - ModuleSource::Translated(llvm) => Some(llvm), - _ => None, - }) - .collect(); - - if sess.target.target.options.is_like_msvc && - sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { - create_imps(sess, &llvm_modules); - } - // Translate an allocator shim, if any // // If LTO is enabled and we've got some previous LLVM module we translated From 7e09d1e1709d9228cc8e0deba834e6752354f107 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 15:02:53 +0200 Subject: [PATCH 096/213] async-llvm(18): Instantiate OngoingCrateTranslation before starting translation. --- src/librustc/middle/cstore.rs | 10 +- src/librustc_driver/driver.rs | 6 +- src/librustc_incremental/persist/save.rs | 4 +- src/librustc_metadata/cstore_impl.rs | 5 +- src/librustc_metadata/encoder.rs | 7 +- src/librustc_trans/base.rs | 127 +++++++++++++---------- 6 files changed, 84 insertions(+), 75 deletions(-) diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 48bddf2f7175..b1f4aa69adb9 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -50,7 +50,7 @@ pub use self::NativeLibraryKind::*; // lonely orphan structs and enums looking for a better home -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Copy)] pub struct LinkMeta { pub crate_hash: Svh, } @@ -161,15 +161,13 @@ pub struct ExternCrate { } pub struct EncodedMetadata { - pub raw_data: Vec, - pub hashes: EncodedMetadataHashes, + pub raw_data: Vec } impl EncodedMetadata { pub fn new() -> EncodedMetadata { EncodedMetadata { raw_data: Vec::new(), - hashes: EncodedMetadataHashes::new(), } } } @@ -294,7 +292,7 @@ pub trait CrateStore { tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, reachable: &NodeSet) - -> EncodedMetadata; + -> (EncodedMetadata, EncodedMetadataHashes); fn metadata_encoding_version(&self) -> &[u8]; } @@ -424,7 +422,7 @@ impl CrateStore for DummyCrateStore { tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, reachable: &NodeSet) - -> EncodedMetadata { + -> (EncodedMetadata, EncodedMetadataHashes) { bug!("encode_metadata") } fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") } diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index ba4a6c0d67dd..ee9d30b58fef 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -206,7 +206,7 @@ pub fn compile_input(sess: &Session, println!("Pre-trans"); tcx.print_debug_stats(); } - let trans = phase_4_translate_to_llvm(tcx, analysis, &incremental_hashes_map, + let trans = phase_4_translate_to_llvm(tcx, analysis, incremental_hashes_map, &outputs); if log_enabled!(::log::LogLevel::Info) { @@ -1051,7 +1051,7 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, /// be discarded. pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis: ty::CrateAnalysis, - incremental_hashes_map: &IncrementalHashesMap, + incremental_hashes_map: IncrementalHashesMap, output_filenames: &OutputFilenames) -> write::OngoingCrateTranslation { let time_passes = tcx.sess.time_passes(); @@ -1063,7 +1063,7 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let translation = time(time_passes, "translation", - move || trans::trans_crate(tcx, analysis, &incremental_hashes_map, output_filenames)); + move || trans::trans_crate(tcx, analysis, incremental_hashes_map, output_filenames)); translation } diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index 1bdd4f851fb1..339e2bdc1573 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -34,7 +34,7 @@ use super::file_format; use super::work_product; pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &IncrementalHashesMap, + incremental_hashes_map: IncrementalHashesMap, metadata_hashes: &EncodedMetadataHashes, svh: Svh) { debug!("save_dep_graph()"); @@ -51,7 +51,7 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges()); } - let mut hcx = HashContext::new(tcx, incremental_hashes_map); + let mut hcx = HashContext::new(tcx, &incremental_hashes_map); let preds = Predecessors::new(&query, &mut hcx); let mut current_metadata_hashes = FxHashMap(); diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index 25079613e586..e8b0dea1e8ac 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -15,7 +15,8 @@ use schema; use rustc::ty::maps::QueryConfig; use rustc::middle::cstore::{CrateStore, CrateSource, LibSource, DepKind, NativeLibrary, MetadataLoader, LinkMeta, - LinkagePreference, LoadedMacro, EncodedMetadata}; + LinkagePreference, LoadedMacro, EncodedMetadata, + EncodedMetadataHashes}; use rustc::hir::def; use rustc::middle::lang_items; use rustc::session::Session; @@ -443,7 +444,7 @@ impl CrateStore for cstore::CStore { tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, reachable: &NodeSet) - -> EncodedMetadata + -> (EncodedMetadata, EncodedMetadataHashes) { encoder::encode_metadata(tcx, link_meta, reachable) } diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 5d73abc3ee8b..c35d8407c9d3 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -1638,7 +1638,7 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ImplVisitor<'a, 'tcx> { pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, link_meta: &LinkMeta, exported_symbols: &NodeSet) - -> EncodedMetadata + -> (EncodedMetadata, EncodedMetadataHashes) { let mut cursor = Cursor::new(vec![]); cursor.write_all(METADATA_HEADER).unwrap(); @@ -1681,10 +1681,7 @@ pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, result[header + 2] = (pos >> 8) as u8; result[header + 3] = (pos >> 0) as u8; - EncodedMetadata { - raw_data: result, - hashes: metadata_hashes, - } + (EncodedMetadata { raw_data: result }, metadata_hashes) } pub fn get_repr_options<'a, 'tcx, 'gcx>(tcx: &TyCtxt<'a, 'tcx, 'gcx>, did: DefId) -> ReprOptions { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 9e6fe5ab4349..bd49ad955f19 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -38,7 +38,7 @@ use llvm; use metadata; use rustc::hir::def_id::LOCAL_CRATE; use rustc::middle::lang_items::StartFnLangItem; -use rustc::middle::cstore::EncodedMetadata; +use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::dep_graph::AssertDepGraphSafe; use rustc::middle::cstore::LinkMeta; @@ -729,7 +729,8 @@ fn contains_null(s: &str) -> bool { fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, link_meta: &LinkMeta, exported_symbols: &NodeSet) - -> (ContextRef, ModuleRef, EncodedMetadata) { + -> (ContextRef, ModuleRef, + EncodedMetadata, EncodedMetadataHashes) { use std::io::Write; use flate2::Compression; use flate2::write::DeflateEncoder; @@ -759,15 +760,18 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, }).max().unwrap(); if kind == MetadataKind::None { - return (metadata_llcx, metadata_llmod, EncodedMetadata::new()); + return (metadata_llcx, + metadata_llmod, + EncodedMetadata::new(), + EncodedMetadataHashes::new()); } let cstore = &tcx.sess.cstore; - let metadata = cstore.encode_metadata(tcx, - &link_meta, - exported_symbols); + let (metadata, hashes) = cstore.encode_metadata(tcx, + &link_meta, + exported_symbols); if kind == MetadataKind::Uncompressed { - return (metadata_llcx, metadata_llmod, metadata); + return (metadata_llcx, metadata_llmod, metadata, hashes); } assert!(kind == MetadataKind::Compressed); @@ -795,7 +799,7 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, let directive = CString::new(directive).unwrap(); llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr()) } - return (metadata_llcx, metadata_llmod, metadata); + return (metadata_llcx, metadata_llmod, metadata, hashes); } // Create a `__imp_ = &symbol` global for every public static `symbol`. @@ -919,7 +923,7 @@ pub fn find_exported_symbols(tcx: TyCtxt, reachable: &NodeSet) -> NodeSet { pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis: ty::CrateAnalysis, - incremental_hashes_map: &IncrementalHashesMap, + incremental_hashes_map: IncrementalHashesMap, output_filenames: &OutputFilenames) -> OngoingCrateTranslation { // Be careful with this krate: obviously it gives access to the @@ -927,19 +931,16 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // `TransCrate`, you need to be careful to register "reads" of the // particular items that will be processed. let krate = tcx.hir.krate(); - let ty::CrateAnalysis { reachable, .. } = analysis; - let check_overflow = tcx.sess.overflow_checks(); - - let link_meta = link::build_link_meta(incremental_hashes_map); - + let link_meta = link::build_link_meta(&incremental_hashes_map); let exported_symbol_node_ids = find_exported_symbols(tcx, &reachable); + let shared_ccx = SharedCrateContext::new(tcx, check_overflow, output_filenames); // Translate the metadata. - let (metadata_llcx, metadata_llmod, metadata) = + let (metadata_llcx, metadata_llmod, metadata, metadata_incr_hashes) = time(tcx.sess.time_passes(), "write metadata", || { write_metadata(tcx, &link_meta, &exported_symbol_node_ids) }); @@ -976,6 +977,11 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module); ongoing_translation.signal_translation_done(); + assert_and_save_dep_graph(tcx, + incremental_hashes_map, + metadata_incr_hashes, + link_meta); + return ongoing_translation; } @@ -989,6 +995,35 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, assert!(codegen_units.len() <= 1 || !tcx.sess.lto()); + let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols); + let subsystem = attr::first_attr_value_str_by_name(&krate.attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + (tcx.sess.target.target.options.no_integrated_as && + (output_filenames.outputs.contains_key(&OutputType::Object) || + output_filenames.outputs.contains_key(&OutputType::Exe))); + + let ongoing_translation = write::run_passes( + tcx.sess, + output_filenames, + tcx.crate_name(LOCAL_CRATE), + link_meta, + metadata, + exported_symbols.clone(), + no_builtins, + windows_subsystem, + linker_info, + no_integrated_as); + let translation_items = Arc::new(translation_items); let mut all_stats = Stats::default(); @@ -1209,48 +1244,10 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, None }; - let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols); - - let subsystem = attr::first_attr_value_str_by_name(&krate.attrs, - "windows_subsystem"); - let windows_subsystem = subsystem.map(|subsystem| { - if subsystem != "windows" && subsystem != "console" { - tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ - `windows` and `console` are allowed", - subsystem)); - } - subsystem.to_string() - }); - - let outputs = output_filenames; - - let no_integrated_as = sess.opts.cg.no_integrated_as || - (sess.target.target.options.no_integrated_as && - (outputs.outputs.contains_key(&OutputType::Object) || - outputs.outputs.contains_key(&OutputType::Exe))); - - time(sess.time_passes(), - "assert dep graph", - || rustc_incremental::assert_dep_graph(tcx)); - - time(sess.time_passes(), - "serialize dep graph", - || rustc_incremental::save_dep_graph(tcx, - incremental_hashes_map, - &metadata.hashes, - link_meta.crate_hash)); - // --- - let ongoing_translation = write::run_passes( - sess, - outputs, - tcx.crate_name(LOCAL_CRATE), - link_meta, - metadata, - exported_symbols, - no_builtins, - windows_subsystem, - linker_info, - no_integrated_as); + assert_and_save_dep_graph(tcx, + incremental_hashes_map, + metadata_incr_hashes, + link_meta); ongoing_translation.submit_translated_module_to_llvm(sess, metadata_module); @@ -1267,6 +1264,22 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ongoing_translation } +fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: IncrementalHashesMap, + metadata_incr_hashes: EncodedMetadataHashes, + link_meta: LinkMeta) { + time(tcx.sess.time_passes(), + "assert dep graph", + || rustc_incremental::assert_dep_graph(tcx)); + + time(tcx.sess.time_passes(), + "serialize dep graph", + || rustc_incremental::save_dep_graph(tcx, + incremental_hashes_map, + &metadata_incr_hashes, + link_meta.crate_hash)); +} + #[inline(never)] // give this a place in the profiler fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trans_items: I) where I: Iterator> From 81b789fd879b8dcafe9d6acc06fdc71261c520fb Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 16:02:32 +0200 Subject: [PATCH 097/213] async-llvm(19): Already start LLVM while still translating. --- src/librustc_trans/assert_module_sources.rs | 36 ++++++------ src/librustc_trans/back/write.rs | 5 +- src/librustc_trans/base.rs | 64 ++++++++++----------- 3 files changed, 56 insertions(+), 49 deletions(-) diff --git a/src/librustc_trans/assert_module_sources.rs b/src/librustc_trans/assert_module_sources.rs index b5ef4aac34c8..6e661a5a8c6a 100644 --- a/src/librustc_trans/assert_module_sources.rs +++ b/src/librustc_trans/assert_module_sources.rs @@ -37,11 +37,22 @@ use rustc::ich::{ATTR_PARTITION_REUSED, ATTR_PARTITION_TRANSLATED}; const MODULE: &'static str = "module"; const CFG: &'static str = "cfg"; -#[derive(Debug, PartialEq)] -enum Disposition { Reused, Translated } +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum Disposition { Reused, Translated } + +impl ModuleTranslation { + pub fn disposition(&self) -> (String, Disposition) { + let disposition = match self.source { + ModuleSource::Preexisting(_) => Disposition::Reused, + ModuleSource::Translated(_) => Disposition::Translated, + }; + + (self.name.clone(), disposition) + } +} pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - modules: &[ModuleTranslation]) { + modules: &[(String, Disposition)]) { let _ignore = tcx.dep_graph.in_ignore(); if tcx.sess.opts.incremental.is_none() { @@ -56,7 +67,7 @@ pub(crate) fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, struct AssertModuleSource<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - modules: &'a [ModuleTranslation], + modules: &'a [(String, Disposition)], } impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { @@ -75,15 +86,15 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { } let mname = self.field(attr, MODULE); - let mtrans = self.modules.iter().find(|mtrans| *mtrans.name == *mname.as_str()); + let mtrans = self.modules.iter().find(|&&(ref name, _)| name == mname.as_str()); let mtrans = match mtrans { Some(m) => m, None => { debug!("module name `{}` not found amongst:", mname); - for mtrans in self.modules { + for &(ref name, ref disposition) in self.modules { debug!("module named `{}` with disposition {:?}", - mtrans.name, - self.disposition(mtrans)); + name, + disposition); } self.tcx.sess.span_err( @@ -93,7 +104,7 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { } }; - let mtrans_disposition = self.disposition(mtrans); + let mtrans_disposition = mtrans.1; if disposition != mtrans_disposition { self.tcx.sess.span_err( attr.span, @@ -104,13 +115,6 @@ impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { } } - fn disposition(&self, mtrans: &ModuleTranslation) -> Disposition { - match mtrans.source { - ModuleSource::Preexisting(_) => Disposition::Reused, - ModuleSource::Translated(_) => Disposition::Translated, - } - } - fn field(&self, attr: &ast::Attribute, name: &str) -> ast::Name { for item in attr.meta_item_list().unwrap_or_else(Vec::new) { if item.check_name(name) { diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 118853b87123..6c12a4989f5b 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -678,7 +678,6 @@ pub fn run_passes(sess: &Session, }; // Figure out what we actually need to build. - let mut modules_config = ModuleConfig::new(sess, sess.opts.cg.passes.clone()); let mut metadata_config = ModuleConfig::new(sess, vec![]); let mut allocator_config = ModuleConfig::new(sess, vec![]); @@ -1615,4 +1614,8 @@ impl OngoingCrateTranslation { pub fn signal_translation_done(&self) { drop(self.coordinator_send.send(Message::TranslationDone)); } + + pub fn check_for_errors(&self, sess: &Session) { + self.shared_emitter_main.check(sess, false); + } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index bd49ad955f19..0137fa086932 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -1024,24 +1024,34 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, linker_info, no_integrated_as); + ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module); + let translation_items = Arc::new(translation_items); let mut all_stats = Stats::default(); - let modules: Vec = codegen_units - .into_iter() - .map(|cgu| { - let dep_node = cgu.work_product_dep_node(); - let ((stats, module), _) = - tcx.dep_graph.with_task(dep_node, - AssertDepGraphSafe(&shared_ccx), - AssertDepGraphSafe((cgu, - translation_items.clone(), - exported_symbols.clone())), - module_translation); - all_stats.extend(stats); - module - }) - .collect(); + let mut module_dispositions = tcx.sess.opts.incremental.as_ref().map(|_| Vec::new()); + + for cgu in codegen_units.into_iter() { + ongoing_translation.check_for_errors(tcx.sess); + let dep_node = cgu.work_product_dep_node(); + let ((stats, module), _) = + tcx.dep_graph.with_task(dep_node, + AssertDepGraphSafe(&shared_ccx), + AssertDepGraphSafe((cgu, + translation_items.clone(), + exported_symbols.clone())), + module_translation); + all_stats.extend(stats); + + if let Some(ref mut module_dispositions) = module_dispositions { + module_dispositions.push(module.disposition()); + } + ongoing_translation.submit_translated_module_to_llvm(tcx.sess, module); + } + + if let Some(module_dispositions) = module_dispositions { + assert_module_sources::assert_module_sources(tcx, &module_dispositions); + } fn module_translation<'a, 'tcx>( scx: AssertDepGraphSafe<&SharedCrateContext<'a, 'tcx>>, @@ -1175,8 +1185,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (lcx.into_stats(), module) } - assert_module_sources::assert_module_sources(tcx, &modules); - symbol_names_test::report_symbol_names(tcx); if shared_ccx.sess().trans_stats() { @@ -1207,8 +1215,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - let sess = shared_ccx.sess(); - // Translate an allocator shim, if any // // If LTO is enabled and we've got some previous LLVM module we translated @@ -1244,23 +1250,17 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, None }; + if let Some(allocator_module) = allocator_module { + ongoing_translation.submit_translated_module_to_llvm(tcx.sess, allocator_module); + } + + ongoing_translation.check_for_errors(tcx.sess); + ongoing_translation.signal_translation_done(); + assert_and_save_dep_graph(tcx, incremental_hashes_map, metadata_incr_hashes, link_meta); - - ongoing_translation.submit_translated_module_to_llvm(sess, metadata_module); - - for mtrans in modules { - ongoing_translation.submit_translated_module_to_llvm(sess, mtrans); - } - - if let Some(allocator_module) = allocator_module { - ongoing_translation.submit_translated_module_to_llvm(sess, allocator_module); - } - - ongoing_translation.signal_translation_done(); - ongoing_translation } From ab3bc584c0412e31efd7b92ef77b7cebfa555926 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 26 Jul 2017 16:12:34 +0200 Subject: [PATCH 098/213] async-llvm(20): Do some cleanup. --- src/librustc_trans/back/write.rs | 45 ++++++++++++++++---------------- src/librustc_trans/base.rs | 4 +-- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 6c12a4989f5b..f792da01cde8 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -341,7 +341,7 @@ pub struct CodegenContext { // compiling incrementally pub incr_comp_session_dir: Option, // Channel back to the main control thread to send messages to - pub coordinator_send: Sender, + coordinator_send: Sender, } impl CodegenContext { @@ -660,17 +660,17 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { sess.opts.output_types.contains_key(&OutputType::Exe) } -pub fn run_passes(sess: &Session, - crate_output: &OutputFilenames, - crate_name: Symbol, - link: LinkMeta, - metadata: EncodedMetadata, - exported_symbols: Arc, - no_builtins: bool, - windows_subsystem: Option, - linker_info: LinkerInfo, - no_integrated_as: bool) - -> OngoingCrateTranslation { +pub fn start_async_translation(sess: &Session, + crate_output: &OutputFilenames, + crate_name: Symbol, + link: LinkMeta, + metadata: EncodedMetadata, + exported_symbols: Arc, + no_builtins: bool, + windows_subsystem: Option, + linker_info: LinkerInfo, + no_integrated_as: bool) + -> OngoingCrateTranslation { let output_types_override = if no_integrated_as { OutputTypes::new(&[(OutputType::Assembly, None)]) } else { @@ -1061,7 +1061,7 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) } #[derive(Debug)] -pub enum Message { +enum Message { Token(io::Result), Done { result: Result }, WorkItem(WorkItem), @@ -1069,8 +1069,7 @@ pub enum Message { TranslationDone, } - -pub struct Diagnostic { +struct Diagnostic { msg: String, code: Option, lvl: Level, @@ -1519,14 +1518,14 @@ impl SharedEmitterMain { } pub struct OngoingCrateTranslation { - pub crate_name: Symbol, - pub link: LinkMeta, - pub metadata: EncodedMetadata, - pub exported_symbols: Arc, - pub no_builtins: bool, - pub windows_subsystem: Option, - pub linker_info: LinkerInfo, - pub no_integrated_as: bool, + crate_name: Symbol, + link: LinkMeta, + metadata: EncodedMetadata, + exported_symbols: Arc, + no_builtins: bool, + windows_subsystem: Option, + linker_info: LinkerInfo, + no_integrated_as: bool, output_filenames: OutputFilenames, regular_module_config: ModuleConfig, diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 0137fa086932..2e6093eb1ca3 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -962,7 +962,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, !tcx.sess.opts.output_types.should_trans() { let empty_exported_symbols = ExportedSymbols::empty(); let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols); - let ongoing_translation = write::run_passes( + let ongoing_translation = write::start_async_translation( tcx.sess, output_filenames, tcx.crate_name(LOCAL_CRATE), @@ -1012,7 +1012,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (output_filenames.outputs.contains_key(&OutputType::Object) || output_filenames.outputs.contains_key(&OutputType::Exe))); - let ongoing_translation = write::run_passes( + let ongoing_translation = write::start_async_translation( tcx.sess, output_filenames, tcx.crate_name(LOCAL_CRATE), From 1480be37795bd25b7d7cedf9e1ef5caf985fe38c Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Thu, 27 Jul 2017 11:51:27 +0200 Subject: [PATCH 099/213] async-llvm(21): Re-use worker-ids in order to simulate persistent worker threads. --- src/librustc_trans/back/write.rs | 35 ++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index f792da01cde8..086980777e1d 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -1063,7 +1063,10 @@ fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) #[derive(Debug)] enum Message { Token(io::Result), - Done { result: Result }, + Done { + result: Result, + worker_id: usize, + }, WorkItem(WorkItem), CheckErrorMessages, TranslationDone, @@ -1179,6 +1182,18 @@ fn start_executing_work(sess: &Session, // the jobserver. thread::spawn(move || { + let mut worker_id_counter = 0; + let mut free_worker_ids = Vec::new(); + let mut get_worker_id = |free_worker_ids: &mut Vec| { + if let Some(id) = free_worker_ids.pop() { + id + } else { + let id = worker_id_counter; + worker_id_counter += 1; + id + } + }; + let mut compiled_modules = vec![]; let mut compiled_metadata_module = None; let mut compiled_allocator_module = None; @@ -1186,17 +1201,19 @@ fn start_executing_work(sess: &Session, let mut translation_done = false; let mut work_items = Vec::new(); let mut tokens = Vec::new(); + let mut running = 0; + while !translation_done || work_items.len() > 0 || running > 0 { // Spin up what work we can, only doing this while we've got available // parallelism slots and work left to spawn. while work_items.len() > 0 && running < tokens.len() + 1 { let item = work_items.pop().unwrap(); - let worker_index = work_items.len(); + let worker_id = get_worker_id(&mut free_worker_ids); let cgcx = CodegenContext { - worker: worker_index, + worker: worker_id, .. cgcx.clone() }; @@ -1235,9 +1252,10 @@ fn start_executing_work(sess: &Session, // // Note that if the thread failed that means it panicked, so we // abort immediately. - Message::Done { result: Ok(compiled_module) } => { + Message::Done { result: Ok(compiled_module), worker_id } => { drop(tokens.pop()); running -= 1; + free_worker_ids.push(worker_id); drop(trans_worker_send.send(Message::CheckErrorMessages)); match compiled_module.kind { @@ -1254,7 +1272,7 @@ fn start_executing_work(sess: &Session, } } } - Message::Done { result: Err(()) } => { + Message::Done { result: Err(()), worker_id: _ } => { shared_emitter.fatal("aborting due to worker thread panic"); drop(trans_worker_send.send(Message::CheckErrorMessages)); // Exit the coordinator thread @@ -1288,6 +1306,7 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { struct Bomb { coordinator_send: Sender, result: Option, + worker_id: usize, } impl Drop for Bomb { fn drop(&mut self) { @@ -1296,13 +1315,17 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { None => Err(()) }; - drop(self.coordinator_send.send(Message::Done { result })); + drop(self.coordinator_send.send(Message::Done { + result, + worker_id: self.worker_id, + })); } } let mut bomb = Bomb { coordinator_send: cgcx.coordinator_send.clone(), result: None, + worker_id: cgcx.worker, }; // Execute the work itself, and if it finishes successfully then flag From 88192785233d0fed6cc8702e3067f02208e62a14 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Thu, 27 Jul 2017 13:02:31 +0200 Subject: [PATCH 100/213] async-llvm(22): mw invokes mad html skillz to produce graphical LLVM timing reports. --- src/librustc/session/config.rs | 2 + src/librustc_trans/back/write.rs | 26 +++++ src/librustc_trans/base.rs | 14 +++ src/librustc_trans/lib.rs | 1 + src/librustc_trans/time_graph.rs | 181 +++++++++++++++++++++++++++++++ 5 files changed, 224 insertions(+) create mode 100644 src/librustc_trans/time_graph.rs diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 8c4cc20deb7b..4a9fbbe6f157 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1059,6 +1059,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "choose which RELRO level to use"), nll: bool = (false, parse_bool, [UNTRACKED], "run the non-lexical lifetimes MIR pass"), + trans_time_graph: bool = (false, parse_bool, [UNTRACKED], + "generate a graphical HTML report of time spent in trans and LLVM"), } pub fn default_lib_output() -> CrateType { diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 086980777e1d..a3845cf0e8e7 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -17,6 +17,7 @@ use rustc::middle::cstore::{LinkMeta, EncodedMetadata}; use rustc::session::config::{self, OutputFilenames, OutputType, OutputTypes, Passes, SomePasses, AllPasses, Sanitizer}; use rustc::session::Session; +use time_graph::{self, TimeGraph}; use llvm; use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef}; use llvm::SMDiagnosticRef; @@ -342,6 +343,9 @@ pub struct CodegenContext { pub incr_comp_session_dir: Option, // Channel back to the main control thread to send messages to coordinator_send: Sender, + // A reference to the TimeGraph so we can register timings. None means that + // measuring is disabled. + time_graph: Option, } impl CodegenContext { @@ -662,6 +666,7 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { pub fn start_async_translation(sess: &Session, crate_output: &OutputFilenames, + time_graph: Option, crate_name: Symbol, link: LinkMeta, metadata: EncodedMetadata, @@ -768,6 +773,7 @@ pub fn start_async_translation(sess: &Session, coordinator_send.clone(), coordinator_receive, client, + time_graph.clone(), exported_symbols.clone()); OngoingCrateTranslation { crate_name, @@ -783,6 +789,7 @@ pub fn start_async_translation(sess: &Session, metadata_module_config: metadata_config, allocator_module_config: allocator_config, + time_graph, output_filenames: crate_output.clone(), coordinator_send, shared_emitter_main, @@ -1084,6 +1091,7 @@ fn start_executing_work(sess: &Session, coordinator_send: Sender, coordinator_receive: Receiver, jobserver: Client, + time_graph: Option, exported_symbols: Arc) -> thread::JoinHandle { // First up, convert our jobserver into a helper thread so we can use normal @@ -1123,6 +1131,7 @@ fn start_executing_work(sess: &Session, incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), coordinator_send: coordinator_send, diag_emitter: shared_emitter.clone(), + time_graph, }; // This is the "main loop" of parallel work happening for parallel codegen. @@ -1295,10 +1304,22 @@ fn start_executing_work(sess: &Session, }) } +pub const TRANS_WORKER_ID: usize = ::std::usize::MAX; +pub const TRANS_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(TRANS_WORKER_ID); +pub const TRANS_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); +const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); + fn spawn_work(cgcx: CodegenContext, work: WorkItem) { let depth = time_depth(); thread::spawn(move || { + let _timing_guard = cgcx.time_graph + .as_ref() + .map(|tg| tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND)); set_time_depth(depth); // Set up a destructor which will fire off a message that we're done as @@ -1555,6 +1576,7 @@ pub struct OngoingCrateTranslation { metadata_module_config: ModuleConfig, allocator_module_config: ModuleConfig, + time_graph: Option, coordinator_send: Sender, shared_emitter_main: SharedEmitterMain, future: thread::JoinHandle, @@ -1567,6 +1589,10 @@ impl OngoingCrateTranslation { sess.abort_if_errors(); + if let Some(time_graph) = self.time_graph { + time_graph.dump(&format!("{}-timings", self.crate_name)); + } + copy_module_artifacts_into_incr_comp_cache(sess, &compiled_modules, &self.output_filenames); diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 2e6093eb1ca3..0b82ac71c33d 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -69,6 +69,7 @@ use mir; use monomorphize::{self, Instance}; use partitioning::{self, PartitioningStrategy, CodegenUnit}; use symbol_names_test; +use time_graph; use trans_item::{TransItem, DefPathBasedNames}; use type_::Type; use type_of; @@ -956,6 +957,11 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; let no_builtins = attr::contains_name(&krate.attrs, "no_builtins"); + let time_graph = if tcx.sess.opts.debugging_opts.trans_time_graph { + Some(time_graph::TimeGraph::new()) + } else { + None + }; // Skip crate items and just output metadata in -Z no-trans mode. if tcx.sess.opts.debugging_opts.no_trans || @@ -965,6 +971,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let ongoing_translation = write::start_async_translation( tcx.sess, output_filenames, + time_graph.clone(), tcx.crate_name(LOCAL_CRATE), link_meta, metadata, @@ -1015,6 +1022,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let ongoing_translation = write::start_async_translation( tcx.sess, output_filenames, + time_graph.clone(), tcx.crate_name(LOCAL_CRATE), link_meta, metadata, @@ -1033,6 +1041,12 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, for cgu in codegen_units.into_iter() { ongoing_translation.check_for_errors(tcx.sess); + + let _timing_guard = time_graph + .as_ref() + .map(|time_graph| time_graph.start(write::TRANS_WORKER_TIMELINE, + write::TRANS_WORK_PACKAGE_KIND)); + let dep_node = cgu.work_product_dep_node(); let ((stats, module), _) = tcx.dep_graph.with_task(dep_node, diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 62ff1535be95..83835cb794ab 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -125,6 +125,7 @@ mod mir; mod monomorphize; mod partitioning; mod symbol_names_test; +mod time_graph; mod trans_item; mod tvec; mod type_; diff --git a/src/librustc_trans/time_graph.rs b/src/librustc_trans/time_graph.rs new file mode 100644 index 000000000000..e0ebe8a0933f --- /dev/null +++ b/src/librustc_trans/time_graph.rs @@ -0,0 +1,181 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::HashMap; +use std::marker::PhantomData; +use std::sync::{Arc, Mutex}; +use std::time::Instant; +use std::io::prelude::*; +use std::fs::File; + +const OUTPUT_WIDTH_IN_PX: u64 = 1000; +const TIME_LINE_HEIGHT_IN_PX: u64 = 7; +const TIME_LINE_HEIGHT_STRIDE_IN_PX: usize = 10; + +#[derive(Clone)] +struct Timing { + start: Instant, + end: Instant, + work_package_kind: WorkPackageKind, +} + +#[derive(Clone, Copy, Hash, Eq, PartialEq, Debug)] +pub struct TimelineId(pub usize); + +#[derive(Clone)] +struct PerThread { + timings: Vec, + open_work_package: Option<(Instant, WorkPackageKind)>, +} + +#[derive(Clone)] +pub struct TimeGraph { + data: Arc>>, +} + +#[derive(Clone, Copy)] +pub struct WorkPackageKind(pub &'static [&'static str]); + +pub struct RaiiToken { + graph: TimeGraph, + timeline: TimelineId, + // The token must not be Send: + _marker: PhantomData<*const ()> +} + + +impl Drop for RaiiToken { + fn drop(&mut self) { + self.graph.end(self.timeline); + } +} + +impl TimeGraph { + pub fn new() -> TimeGraph { + TimeGraph { + data: Arc::new(Mutex::new(HashMap::new())) + } + } + + pub fn start(&self, + timeline: TimelineId, + work_package_kind: WorkPackageKind) -> RaiiToken { + { + let mut table = self.data.lock().unwrap(); + + let mut data = table.entry(timeline).or_insert(PerThread { + timings: Vec::new(), + open_work_package: None, + }); + + assert!(data.open_work_package.is_none()); + data.open_work_package = Some((Instant::now(), work_package_kind)); + } + + RaiiToken { + graph: self.clone(), + timeline, + _marker: PhantomData, + } + } + + fn end(&self, timeline: TimelineId) { + let end = Instant::now(); + + let mut table = self.data.lock().unwrap(); + let mut data = table.get_mut(&timeline).unwrap(); + + if let Some((start, work_package_kind)) = data.open_work_package { + data.timings.push(Timing { + start, + end, + work_package_kind, + }); + } else { + bug!("end timing without start?") + } + + data.open_work_package = None; + } + + pub fn dump(&self, output_filename: &str) { + let table = self.data.lock().unwrap(); + + for data in table.values() { + assert!(data.open_work_package.is_none()); + } + + let mut timelines: Vec = + table.values().map(|data| data.clone()).collect(); + + timelines.sort_by_key(|timeline| timeline.timings[0].start); + + let earliest_instant = timelines[0].timings[0].start; + let latest_instant = timelines.iter() + .map(|timeline| timeline.timings + .last() + .unwrap() + .end) + .max() + .unwrap(); + let max_distance = distance(earliest_instant, latest_instant); + + let mut file = File::create(format!("{}.html", output_filename)).unwrap(); + + writeln!(file, "").unwrap(); + writeln!(file, "").unwrap(); + writeln!(file, "").unwrap(); + + let mut color = 0; + + for (line_index, timeline) in timelines.iter().enumerate() { + let line_top = line_index * TIME_LINE_HEIGHT_STRIDE_IN_PX; + + for span in &timeline.timings { + let start = distance(earliest_instant, span.start); + let end = distance(earliest_instant, span.end); + + let start = normalize(start, max_distance, OUTPUT_WIDTH_IN_PX); + let end = normalize(end, max_distance, OUTPUT_WIDTH_IN_PX); + + let colors = span.work_package_kind.0; + + writeln!(file, "
", + line_top, + start, + end - start, + TIME_LINE_HEIGHT_IN_PX, + colors[color % colors.len()] + ).unwrap(); + + color += 1; + } + } + + writeln!(file, "").unwrap(); + writeln!(file, "").unwrap(); + } +} + +fn distance(zero: Instant, x: Instant) -> u64 { + + let duration = x.duration_since(zero); + (duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64) // / div +} + +fn normalize(distance: u64, max: u64, max_pixels: u64) -> u64 { + (max_pixels * distance) / max +} + From f5acc392e0b28295ccaff6135e12fab219b0b006 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Thu, 27 Jul 2017 16:59:30 +0200 Subject: [PATCH 101/213] async-llvm(23): Let the main thread also do LLVM work in order to reduce memory pressure. --- src/librustc_trans/back/write.rs | 177 +++++++++++++++++++++++++------ src/librustc_trans/base.rs | 135 ++++++++++++----------- 2 files changed, 216 insertions(+), 96 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index a3845cf0e8e7..649b16f17a92 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -764,7 +764,7 @@ pub fn start_async_translation(sess: &Session, }); let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); - let (trans_worker_send, _trans_worker_receive) = channel(); + let (trans_worker_send, trans_worker_receive) = channel(); let (coordinator_send, coordinator_receive) = channel(); let coordinator_thread = start_executing_work(sess, @@ -792,6 +792,7 @@ pub fn start_async_translation(sess: &Session, time_graph, output_filenames: crate_output.clone(), coordinator_send, + trans_worker_receive, shared_emitter_main, future: coordinator_thread } @@ -987,7 +988,7 @@ pub fn dump_incremental_data(trans: &CrateTranslation) { eprintln!("incremental: re-using {} out of {} modules", reuse, trans.modules.len()); } -pub struct WorkItem { +struct WorkItem { mtrans: ModuleTranslation, config: ModuleConfig, output_names: OutputFilenames @@ -1074,9 +1075,11 @@ enum Message { result: Result, worker_id: usize, }, - WorkItem(WorkItem), - CheckErrorMessages, - TranslationDone, + TranslationDone { + llvm_work_item: WorkItem, + is_last: bool + }, + TranslateItem, } struct Diagnostic { @@ -1085,6 +1088,13 @@ struct Diagnostic { lvl: Level, } +#[derive(PartialEq, Clone, Copy, Debug)] +enum TransWorkerState { + Idle, + Translating, + LLVMing, +} + fn start_executing_work(sess: &Session, shared_emitter: SharedEmitter, trans_worker_send: Sender, @@ -1189,7 +1199,6 @@ fn start_executing_work(sess: &Session, // Before that work finishes, however, we may acquire a token. In that case // we actually wastefully acquired the token, so we relinquish it back to // the jobserver. - thread::spawn(move || { let mut worker_id_counter = 0; let mut free_worker_ids = Vec::new(); @@ -1211,13 +1220,74 @@ fn start_executing_work(sess: &Session, let mut work_items = Vec::new(); let mut tokens = Vec::new(); + let mut trans_worker_state = TransWorkerState::Idle; let mut running = 0; - while !translation_done || work_items.len() > 0 || running > 0 { + while !translation_done || + work_items.len() > 0 || + running > 0 || + trans_worker_state != TransWorkerState::Idle { + + if !translation_done { + if trans_worker_state == TransWorkerState::Idle { + // Translation is not done yet, so there are two things the + // translation worker could do: + // + // (1) Translate another CGU + // (2) Run an already translated CGU through LLVM + // + // Option (2) makes sense if there's already enough work for + // all the other workers. In that case it's better to run + // a CGU through LLVM, so its resources can be freed. + // + // However, it's not trivial to determines what "enough work + // for all the other workers" means because: + // + // (1) We don't know how long the currently working workers + // will need to finish their work package, and + // (2) we don't know how many idle workers would be available + // because that is dynamically decided by the jobserver. + // + // TODO: Come up with a useful heuristic. + if work_items.len() <= 4 { + trans_worker_send.send(Message::TranslateItem).unwrap(); + trans_worker_state = TransWorkerState::Translating; + } else { + let item = work_items.pop().unwrap(); + let cgcx = CodegenContext { + worker: TRANS_WORKER_ID, + .. cgcx.clone() + }; + trans_worker_state = TransWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + } else { + match trans_worker_state { + TransWorkerState::Idle => { + if let Some(item) = work_items.pop() { + let cgcx = CodegenContext { + worker: TRANS_WORKER_ID, + .. cgcx.clone() + }; + + trans_worker_state = TransWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + TransWorkerState::Translating => { + bug!("trans worker should not be translating after \ + translation was already completed") + } + TransWorkerState::LLVMing => { + // Already making good use of that token + } + } + } // Spin up what work we can, only doing this while we've got available // parallelism slots and work left to spawn. - while work_items.len() > 0 && running < tokens.len() + 1 { + while work_items.len() > 0 && running < tokens.len() { let item = work_items.pop().unwrap(); let worker_id = get_worker_id(&mut free_worker_ids); @@ -1231,7 +1301,7 @@ fn start_executing_work(sess: &Session, } // Relinquish accidentally acquired extra tokens - tokens.truncate(running.saturating_sub(1)); + tokens.truncate(running); match coordinator_receive.recv().unwrap() { // Save the token locally and the next turn of the loop will use @@ -1242,15 +1312,25 @@ fn start_executing_work(sess: &Session, tokens.push(token); } else { shared_emitter.fatal("failed to acquire jobserver token"); - drop(trans_worker_send.send(Message::CheckErrorMessages)); // Exit the coordinator thread panic!() } } - Message::WorkItem(work_item) => { - work_items.push(work_item); - helper.request_token(); + Message::TranslationDone { llvm_work_item, is_last } => { + work_items.insert(0, llvm_work_item); + + if is_last { + // If this is the last, don't request a token because + // the trans worker thread will be free to handle this + // immediately. + translation_done = true; + } else { + helper.request_token(); + } + + assert_eq!(trans_worker_state, TransWorkerState::Translating); + trans_worker_state = TransWorkerState::Idle; } // If a thread exits successfully then we drop a token associated @@ -1262,10 +1342,14 @@ fn start_executing_work(sess: &Session, // Note that if the thread failed that means it panicked, so we // abort immediately. Message::Done { result: Ok(compiled_module), worker_id } => { - drop(tokens.pop()); - running -= 1; - free_worker_ids.push(worker_id); - drop(trans_worker_send.send(Message::CheckErrorMessages)); + if worker_id == TRANS_WORKER_ID { + assert_eq!(trans_worker_state, TransWorkerState::LLVMing); + trans_worker_state = TransWorkerState::Idle; + } else { + drop(tokens.pop()); + running -= 1; + free_worker_ids.push(worker_id); + } match compiled_module.kind { ModuleKind::Regular => { @@ -1283,15 +1367,11 @@ fn start_executing_work(sess: &Session, } Message::Done { result: Err(()), worker_id: _ } => { shared_emitter.fatal("aborting due to worker thread panic"); - drop(trans_worker_send.send(Message::CheckErrorMessages)); // Exit the coordinator thread panic!() } - Message::TranslationDone => { - translation_done = true; - } - msg @ Message::CheckErrorMessages => { - bug!("unexpected message: {:?}", msg); + Message::TranslateItem => { + bug!("the coordinator should not receive translation requests") } } } @@ -1316,10 +1396,6 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { let depth = time_depth(); thread::spawn(move || { - let _timing_guard = cgcx.time_graph - .as_ref() - .map(|tg| tg.start(time_graph::TimelineId(cgcx.worker), - LLVM_WORK_PACKAGE_KIND)); set_time_depth(depth); // Set up a destructor which will fire off a message that we're done as @@ -1362,7 +1438,13 @@ fn spawn_work(cgcx: CodegenContext, work: WorkItem) { // we just ignore the result and then send off our message saying that // we're done, which if `execute_work_item` failed is unlikely to be // seen by the main thread, but hey we might as well try anyway. - bomb.result = Some(execute_work_item(&cgcx, work).unwrap()); + bomb.result = { + let _timing_guard = cgcx.time_graph + .as_ref() + .map(|tg| tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND)); + Some(execute_work_item(&cgcx, work).unwrap()) + }; }); } @@ -1578,6 +1660,7 @@ pub struct OngoingCrateTranslation { time_graph: Option, coordinator_send: Sender, + trans_worker_receive: Receiver, shared_emitter_main: SharedEmitterMain, future: thread::JoinHandle, } @@ -1645,25 +1728,49 @@ impl OngoingCrateTranslation { pub fn submit_translated_module_to_llvm(&self, sess: &Session, - mtrans: ModuleTranslation) { + mtrans: ModuleTranslation, + is_last: bool) { let module_config = match mtrans.kind { ModuleKind::Regular => self.regular_module_config.clone(sess), ModuleKind::Metadata => self.metadata_module_config.clone(sess), ModuleKind::Allocator => self.allocator_module_config.clone(sess), }; - let work_item = build_work_item(mtrans, - module_config, - self.output_filenames.clone()); + let llvm_work_item = build_work_item(mtrans, + module_config, + self.output_filenames.clone()); - drop(self.coordinator_send.send(Message::WorkItem(work_item))); + drop(self.coordinator_send.send(Message::TranslationDone { + llvm_work_item, + is_last + })); } - pub fn signal_translation_done(&self) { - drop(self.coordinator_send.send(Message::TranslationDone)); + pub fn submit_pre_translated_module_to_llvm(&self, + sess: &Session, + mtrans: ModuleTranslation, + is_last: bool) { + self.wait_for_signal_to_translate_item(); + self.check_for_errors(sess); + self.submit_translated_module_to_llvm(sess, mtrans, is_last); } pub fn check_for_errors(&self, sess: &Session) { self.shared_emitter_main.check(sess, false); } + + pub fn wait_for_signal_to_translate_item(&self) { + match self.trans_worker_receive.recv() { + Ok(Message::TranslateItem) => { + // Nothing to do + } + Ok(message) => { + panic!("unexpected message: {:?}", message) + } + Err(_) => { + // One of the LLVM threads must have panicked, fall through so + // error handling can be reached. + } + } + } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 0b82ac71c33d..2d1f43aff571 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -981,14 +981,15 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, linker_info, false); - ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module); - ongoing_translation.signal_translation_done(); + ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, metadata_module, true); assert_and_save_dep_graph(tcx, incremental_hashes_map, metadata_incr_hashes, link_meta); + ongoing_translation.check_for_errors(tcx.sess); + return ongoing_translation; } @@ -1032,35 +1033,87 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, linker_info, no_integrated_as); - ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module); + // Translate an allocator shim, if any + // + // If LTO is enabled and we've got some previous LLVM module we translated + // above, then we can just translate directly into that LLVM module. If not, + // however, we need to create a separate module and trans into that. Note + // that the separate translation is critical for the standard library where + // the rlib's object file doesn't have allocator functions but the dylib + // links in an object file that has allocator functions. When we're + // compiling a final LTO artifact, though, there's no need to worry about + // this as we're not working with this dual "rlib/dylib" functionality. + let allocator_module = if tcx.sess.lto() { + None + } else if let Some(kind) = tcx.sess.allocator_kind.get() { + unsafe { + let (llcx, llmod) = + context::create_context_and_module(tcx.sess, "allocator"); + let modules = ModuleLlvm { + llmod: llmod, + llcx: llcx, + }; + time(tcx.sess.time_passes(), "write allocator module", || { + allocator::trans(tcx, &modules, kind) + }); + + Some(ModuleTranslation { + name: link::ALLOCATOR_MODULE_NAME.to_string(), + symbol_name_hash: 0, // we always rebuild allocator shims + source: ModuleSource::Translated(modules), + kind: ModuleKind::Allocator, + }) + } + } else { + None + }; + + if let Some(allocator_module) = allocator_module { + ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, allocator_module, false); + } + + let codegen_unit_count = codegen_units.len(); + ongoing_translation.submit_pre_translated_module_to_llvm(tcx.sess, + metadata_module, + codegen_unit_count == 0); let translation_items = Arc::new(translation_items); let mut all_stats = Stats::default(); let mut module_dispositions = tcx.sess.opts.incremental.as_ref().map(|_| Vec::new()); - for cgu in codegen_units.into_iter() { + for (cgu_index, cgu) in codegen_units.into_iter().enumerate() { + ongoing_translation.wait_for_signal_to_translate_item(); ongoing_translation.check_for_errors(tcx.sess); - let _timing_guard = time_graph - .as_ref() - .map(|time_graph| time_graph.start(write::TRANS_WORKER_TIMELINE, - write::TRANS_WORK_PACKAGE_KIND)); + let module = { + let _timing_guard = time_graph + .as_ref() + .map(|time_graph| time_graph.start(write::TRANS_WORKER_TIMELINE, + write::TRANS_WORK_PACKAGE_KIND)); + let dep_node = cgu.work_product_dep_node(); + let ((stats, module), _) = + tcx.dep_graph.with_task(dep_node, + AssertDepGraphSafe(&shared_ccx), + AssertDepGraphSafe((cgu, + translation_items.clone(), + exported_symbols.clone())), + module_translation); + all_stats.extend(stats); - let dep_node = cgu.work_product_dep_node(); - let ((stats, module), _) = - tcx.dep_graph.with_task(dep_node, - AssertDepGraphSafe(&shared_ccx), - AssertDepGraphSafe((cgu, - translation_items.clone(), - exported_symbols.clone())), - module_translation); - all_stats.extend(stats); + if let Some(ref mut module_dispositions) = module_dispositions { + module_dispositions.push(module.disposition()); + } - if let Some(ref mut module_dispositions) = module_dispositions { - module_dispositions.push(module.disposition()); - } - ongoing_translation.submit_translated_module_to_llvm(tcx.sess, module); + module + }; + + let is_last_cgu = (cgu_index + 1) == codegen_unit_count; + + ongoing_translation.submit_translated_module_to_llvm(tcx.sess, + module, + is_last_cgu); + ongoing_translation.check_for_errors(tcx.sess); } if let Some(module_dispositions) = module_dispositions { @@ -1229,47 +1282,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - // Translate an allocator shim, if any - // - // If LTO is enabled and we've got some previous LLVM module we translated - // above, then we can just translate directly into that LLVM module. If not, - // however, we need to create a separate module and trans into that. Note - // that the separate translation is critical for the standard library where - // the rlib's object file doesn't have allocator functions but the dylib - // links in an object file that has allocator functions. When we're - // compiling a final LTO artifact, though, there's no need to worry about - // this as we're not working with this dual "rlib/dylib" functionality. - let allocator_module = if tcx.sess.lto() { - None - } else if let Some(kind) = tcx.sess.allocator_kind.get() { - unsafe { - let (llcx, llmod) = - context::create_context_and_module(tcx.sess, "allocator"); - let modules = ModuleLlvm { - llmod: llmod, - llcx: llcx, - }; - time(tcx.sess.time_passes(), "write allocator module", || { - allocator::trans(tcx, &modules, kind) - }); - - Some(ModuleTranslation { - name: link::ALLOCATOR_MODULE_NAME.to_string(), - symbol_name_hash: 0, // we always rebuild allocator shims - source: ModuleSource::Translated(modules), - kind: ModuleKind::Allocator, - }) - } - } else { - None - }; - - if let Some(allocator_module) = allocator_module { - ongoing_translation.submit_translated_module_to_llvm(tcx.sess, allocator_module); - } - ongoing_translation.check_for_errors(tcx.sess); - ongoing_translation.signal_translation_done(); assert_and_save_dep_graph(tcx, incremental_hashes_map, From bd36df84a57f2719e99c691e7ed23d0264836d41 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Fri, 28 Jul 2017 14:28:08 +0200 Subject: [PATCH 102/213] async-llvm(24): Improve scheduling and documentation. --- src/Cargo.lock | 2 +- src/librustc_trans/Cargo.toml | 2 +- src/librustc_trans/back/write.rs | 279 +++++++++++++++++++++---------- src/librustc_trans/base.rs | 21 +++ src/librustc_trans/lib.rs | 2 +- 5 files changed, 215 insertions(+), 91 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index 18d97972cd3e..31742023d46f 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1517,11 +1517,11 @@ dependencies = [ name = "rustc_trans" version = "0.0.0" dependencies = [ - "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", "jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/src/librustc_trans/Cargo.toml b/src/librustc_trans/Cargo.toml index c7db2a9a8ae7..ed9321cc3f3a 100644 --- a/src/librustc_trans/Cargo.toml +++ b/src/librustc_trans/Cargo.toml @@ -10,7 +10,7 @@ crate-type = ["dylib"] test = false [dependencies] -crossbeam = "0.2" +num_cpus = "1.0" flate2 = "0.2" jobserver = "0.1.5" log = "0.3" diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 649b16f17a92..4e68fa8ce40c 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -1077,7 +1077,8 @@ enum Message { }, TranslationDone { llvm_work_item: WorkItem, - is_last: bool + cost: u64, + is_last: bool, }, TranslateItem, } @@ -1089,7 +1090,7 @@ struct Diagnostic { } #[derive(PartialEq, Clone, Copy, Debug)] -enum TransWorkerState { +enum MainThreadWorkerState { Idle, Translating, LLVMing, @@ -1148,16 +1149,110 @@ fn start_executing_work(sess: &Session, // It's here that we manage parallelism, schedule work, and work with // messages coming from clients. // - // Our channel `rx` created above is a channel of messages coming from our - // various worker threads. This includes the jobserver helper thread above - // as well as the work we'll spawn off here. Each turn of this loop starts - // off by trying to spawn as much work as possible. After we've done that we - // then wait for an event and dispatch accordingly once the event is - // received. We're only done once all our work items have been drained and - // nothing is running, at which point we return back up the stack. + // There are a few environmental pre-conditions that shape how the system + // is set up: // - // ## Parallelism management + // - Error reporting only can happen on the main thread because that's the + // only place where we have access to the compiler `Session`. + // - LLVM work can be done on any thread. + // - Translation can only happen on the main thread. + // - Each thread doing substantial work most be in possession of a `Token` + // from the `Jobserver`. + // - The compiler process always holds one `Token`. Any additional `Tokens` + // have to be requested from the `Jobserver`. // + // Error Reporting + // =============== + // The error reporting restriction is handled separately from the rest: We + // set up a `SharedEmitter` the holds an open channel to the main thread. + // When an error occurs on any thread, the shared emitter will send the + // error message to the receiver main thread (`SharedEmitterMain`). The + // main thread will periodically query this error message queue and emit + // any error messages it has received. It might even abort compilation if + // has received a fatal error. In this case we rely on all other threads + // being torn down automatically with the main thread. + // Since the main thread will often be busy doing translation work, error + // reporting will be somewhat delayed, since the message queue can only be + // checked in between to work packages. + // + // Work Processing Infrastructure + // ============================== + // The work processing infrastructure knows three major actors: + // + // - the coordinator thread, + // - the main thread, and + // - LLVM worker threads + // + // The coordinator thread is running a message loop. It instructs the main + // thread about what work to do when, and it will spawn off LLVM worker + // threads as open LLVM WorkItems become available. + // + // The job of the main thread is to translate CGUs into LLVM work package + // (since the main thread is the only thread that can do this). The main + // thread will block until it receives a message from the coordinator, upon + // which it will translate one CGU, send it to the coordinator and block + // again. This way the coordinator can control what the main thread is + // doing. + // + // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is + // available, it will spawn off a new LLVM worker thread and let it process + // that a WorkItem. When a LLVM worker thread is done with its WorkItem, + // it will just shut down, which also frees all resources associated with + // the given LLVM module, and sends a message to the coordinator that the + // has been completed. + // + // Work Scheduling + // =============== + // The scheduler's goal is to minimize the time it takes to complete all + // work there is, however, we also want to keep memory consumption low + // if possible. These two goals are at odds with each other: If memory + // consumption were not an issue, we could just let the main thread produce + // LLVM WorkItems at full speed, assuring maximal utilization of + // Tokens/LLVM worker threads. However, since translation usual is faster + // than LLVM processing, the queue of LLVM WorkItems would fill up and each + // WorkItem potentially holds on to a substantial amount of memory. + // + // So the actual goal is to always produce just enough LLVM WorkItems as + // not to starve our LLVM worker threads. That means, once we have enough + // WorkItems in our queue, we can block the main thread, so it does not + // produce more until we need them. + // + // Doing LLVM Work on the Main Thread + // ---------------------------------- + // Since the main thread owns the compiler processes implicit `Token`, it is + // wasteful to keep it blocked without doing any work. Therefore, what we do + // in this case is: We spawn off an additional LLVM worker thread that helps + // reduce the queue. The work it is doing corresponds to the implicit + // `Token`. The coordinator will mark the main thread as being busy with + // LLVM work. (The actual work happens on another OS thread but we just care + // about `Tokens`, not actual threads). + // + // When any LLVM worker thread finishes while the main thread is marked as + // "busy with LLVM work", we can do a little switcheroo: We give the Token + // of the just finished thread to the LLVM worker thread that is working on + // behalf of the main thread's implicit Token, thus freeing up the main + // thread again. The coordinator can then again decide what the main thread + // should do. This allows the coordinator to make decisions at more points + // in time. + // + // Striking a Balance between Throughput and Memory Consumption + // ------------------------------------------------------------ + // Since our two goals, (1) use as many Tokens as possible and (2) keep + // memory consumption as low as possible, are in conflict with each other, + // we have to find a trade off between them. Right now, the goal is to keep + // all workers busy, which means that no worker should find the queue empty + // when it is ready to start. + // How do we do achieve this? Good question :) We actually never know how + // many `Tokens` are potentially available so it's hard to say how much to + // fill up the queue before switching the main thread to LLVM work. Also we + // currently don't have a means to estimate how long a running LLVM worker + // will still be busy with it's current WorkItem. However, we know the + // maximal count of available Tokens that makes sense (=the number of CPU + // cores), so we can take a conservative guess. The heuristic we use here + // is implemented in the `queue_full_enough()` function. + // + // Some Background on Jobservers + // ----------------------------- // It's worth also touching on the management of parallelism here. We don't // want to just spawn a thread per work item because while that's optimal // parallelism it may overload a system with too many threads or violate our @@ -1170,36 +1265,8 @@ fn start_executing_work(sess: &Session, // and whenever we're done with that work we release the semaphore. In this // manner we can ensure that the maximum number of parallel workers is // capped at any one point in time. - // - // The jobserver protocol is a little unique, however. We, as a running - // process, already have an ephemeral token assigned to us. We're not going - // to be doing any productive work in this thread though so we're going to - // give this token to a worker thread (there's no actual token to give, this - // is just conceptually). As a result you'll see a few `+1` and `-1` - // instances below, and it's about working with this ephemeral token. - // - // To acquire tokens we have our `helper` thread above which is just in a - // loop acquiring tokens and sending them to us. We then store all tokens - // locally in a `tokens` vector once they're acquired. Currently we don't - // literally send a token to a worker thread to assist with management of - // our "ephemeral token". - // - // As a result, our "spawn as much work as possible" basically means that we - // fill up the `running` counter up to the limit of the `tokens` list. - // Whenever we get a new token this'll mean a new unit of work is spawned, - // and then whenever a unit of work finishes we relinquish a token, if we - // had one, to maybe get re-acquired later. - // - // Note that there's a race which may mean that we acquire more tokens than - // we originally anticipated. For example let's say we have 2 units of work. - // First we request one token from the helper thread and then we - // immediately spawn one unit of work with our ephemeral token after. We may - // then finish the first piece of work before the token is acquired, but we - // can continue to spawn the second piece of work with our ephemeral token. - // Before that work finishes, however, we may acquire a token. In that case - // we actually wastefully acquired the token, so we relinquish it back to - // the jobserver. - thread::spawn(move || { + return thread::spawn(move || { + let max_workers = ::num_cpus::get(); let mut worker_id_counter = 0; let mut free_worker_ids = Vec::new(); let mut get_worker_id = |free_worker_ids: &mut Vec| { @@ -1212,74 +1279,75 @@ fn start_executing_work(sess: &Session, } }; + // This is where we collect codegen units that have gone all the way + // through translation and LLVM. let mut compiled_modules = vec![]; let mut compiled_metadata_module = None; let mut compiled_allocator_module = None; + // This flag tracks whether all items have gone through translations let mut translation_done = false; + + // This is the queue of LLVM work items that still need processing. let mut work_items = Vec::new(); + + // This are the Jobserver Tokens we currently hold. Does not include + // the implicit Token the compiler process owns no matter what. let mut tokens = Vec::new(); - let mut trans_worker_state = TransWorkerState::Idle; + let mut main_thread_worker_state = MainThreadWorkerState::Idle; let mut running = 0; + // Run the message loop while there's still anything that needs message + // processing: while !translation_done || work_items.len() > 0 || running > 0 || - trans_worker_state != TransWorkerState::Idle { + main_thread_worker_state != MainThreadWorkerState::Idle { + // While there are still CGUs to be translated, the coordinator has + // to decide how to utilize the compiler processes implicit Token: + // For translating more CGU or for running them through LLVM. if !translation_done { - if trans_worker_state == TransWorkerState::Idle { - // Translation is not done yet, so there are two things the - // translation worker could do: - // - // (1) Translate another CGU - // (2) Run an already translated CGU through LLVM - // - // Option (2) makes sense if there's already enough work for - // all the other workers. In that case it's better to run - // a CGU through LLVM, so its resources can be freed. - // - // However, it's not trivial to determines what "enough work - // for all the other workers" means because: - // - // (1) We don't know how long the currently working workers - // will need to finish their work package, and - // (2) we don't know how many idle workers would be available - // because that is dynamically decided by the jobserver. - // - // TODO: Come up with a useful heuristic. - if work_items.len() <= 4 { + if main_thread_worker_state == MainThreadWorkerState::Idle { + if !queue_full_enough(work_items.len(), running, max_workers) { + // The queue is not full enough, translate more items: trans_worker_send.send(Message::TranslateItem).unwrap(); - trans_worker_state = TransWorkerState::Translating; + main_thread_worker_state = MainThreadWorkerState::Translating; } else { - let item = work_items.pop().unwrap(); + // The queue is full enough to not let the worker + // threads starve. Use the implicit Token to do some + // LLVM work too. + let (item, _) = work_items.pop().unwrap(); let cgcx = CodegenContext { - worker: TRANS_WORKER_ID, + worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() }; - trans_worker_state = TransWorkerState::LLVMing; + main_thread_worker_state = MainThreadWorkerState::LLVMing; spawn_work(cgcx, item); } } } else { - match trans_worker_state { - TransWorkerState::Idle => { - if let Some(item) = work_items.pop() { + // In this branch, we know that everything has been translated, + // so it's just a matter of determining whether the implicit + // Token is free to use for LLVM work. + match main_thread_worker_state { + MainThreadWorkerState::Idle => { + if let Some((item, _)) = work_items.pop() { let cgcx = CodegenContext { - worker: TRANS_WORKER_ID, + worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() }; - trans_worker_state = TransWorkerState::LLVMing; + main_thread_worker_state = MainThreadWorkerState::LLVMing; spawn_work(cgcx, item); } } - TransWorkerState::Translating => { + MainThreadWorkerState::Translating => { bug!("trans worker should not be translating after \ translation was already completed") } - TransWorkerState::LLVMing => { + MainThreadWorkerState::LLVMing => { // Already making good use of that token } } @@ -1288,11 +1356,10 @@ fn start_executing_work(sess: &Session, // Spin up what work we can, only doing this while we've got available // parallelism slots and work left to spawn. while work_items.len() > 0 && running < tokens.len() { - let item = work_items.pop().unwrap(); - let worker_id = get_worker_id(&mut free_worker_ids); + let (item, _) = work_items.pop().unwrap(); let cgcx = CodegenContext { - worker: worker_id, + worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() }; @@ -1310,6 +1377,15 @@ fn start_executing_work(sess: &Session, Message::Token(token) => { if let Ok(token) = token { tokens.push(token); + + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to translation demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } } else { shared_emitter.fatal("failed to acquire jobserver token"); // Exit the coordinator thread @@ -1317,8 +1393,21 @@ fn start_executing_work(sess: &Session, } } - Message::TranslationDone { llvm_work_item, is_last } => { - work_items.insert(0, llvm_work_item); + Message::TranslationDone { llvm_work_item, cost, is_last } => { + // We keep the queue sorted by estimated processing cost, + // so that more expensive items are processed earlier. This + // is good for throughput as it gives the main thread more + // time to fill up the queue and it avoids scheduling + // expensive items to the end. + // Note, however, that this is not ideal for memory + // consumption, as LLVM module sizes are not evenly + // distributed. + let insertion_index = + work_items.binary_search_by_key(&cost, |&(_, cost)| cost); + let insertion_index = match insertion_index { + Ok(idx) | Err(idx) => idx + }; + work_items.insert(insertion_index, (llvm_work_item, cost)); if is_last { // If this is the last, don't request a token because @@ -1329,8 +1418,9 @@ fn start_executing_work(sess: &Session, helper.request_token(); } - assert_eq!(trans_worker_state, TransWorkerState::Translating); - trans_worker_state = TransWorkerState::Idle; + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Translating); + main_thread_worker_state = MainThreadWorkerState::Idle; } // If a thread exits successfully then we drop a token associated @@ -1342,15 +1432,14 @@ fn start_executing_work(sess: &Session, // Note that if the thread failed that means it panicked, so we // abort immediately. Message::Done { result: Ok(compiled_module), worker_id } => { - if worker_id == TRANS_WORKER_ID { - assert_eq!(trans_worker_state, TransWorkerState::LLVMing); - trans_worker_state = TransWorkerState::Idle; + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; } else { - drop(tokens.pop()); running -= 1; - free_worker_ids.push(worker_id); } + free_worker_ids.push(worker_id); + match compiled_module.kind { ModuleKind::Regular => { compiled_modules.push(compiled_module); @@ -1381,7 +1470,16 @@ fn start_executing_work(sess: &Session, metadata_module: compiled_metadata_module.unwrap(), allocator_module: compiled_allocator_module, } - }) + }); + + // A heuristic that determines if we have enough LLVM WorkItems in the + // queue so that the main thread can do LLVM work instead of translation + fn queue_full_enough(items_in_queue: usize, + workers_running: usize, + max_workers: usize) -> bool { + // Tune me, plz. + items_in_queue >= max_workers.saturating_sub(workers_running / 2) + } } pub const TRANS_WORKER_ID: usize = ::std::usize::MAX; @@ -1729,6 +1827,7 @@ impl OngoingCrateTranslation { pub fn submit_translated_module_to_llvm(&self, sess: &Session, mtrans: ModuleTranslation, + cost: u64, is_last: bool) { let module_config = match mtrans.kind { ModuleKind::Regular => self.regular_module_config.clone(sess), @@ -1742,6 +1841,7 @@ impl OngoingCrateTranslation { drop(self.coordinator_send.send(Message::TranslationDone { llvm_work_item, + cost, is_last })); } @@ -1752,7 +1852,10 @@ impl OngoingCrateTranslation { is_last: bool) { self.wait_for_signal_to_translate_item(); self.check_for_errors(sess); - self.submit_translated_module_to_llvm(sess, mtrans, is_last); + + // These are generally cheap and won't through off scheduling. + let cost = 0; + self.submit_translated_module_to_llvm(sess, mtrans, cost, is_last); } pub fn check_for_errors(&self, sess: &Session) { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 2d1f43aff571..e4a763455284 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -80,6 +80,7 @@ use libc::c_uint; use std::ffi::{CStr, CString}; use std::str; use std::sync::Arc; +use std::time::Instant; use std::i32; use syntax_pos::Span; use syntax::attr; @@ -1082,10 +1083,22 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut all_stats = Stats::default(); let mut module_dispositions = tcx.sess.opts.incremental.as_ref().map(|_| Vec::new()); + // We sort the codegen units by size. This way we can schedule work for LLVM + // a bit more efficiently. Note that "size" is defined rather crudely at the + // moment as it is just the number of TransItems in the CGU, not taking into + // account the size of each TransItem. + let codegen_units = { + let mut codegen_units = codegen_units; + codegen_units.sort_by_key(|cgu| -(cgu.items().len() as isize)); + codegen_units + }; + for (cgu_index, cgu) in codegen_units.into_iter().enumerate() { ongoing_translation.wait_for_signal_to_translate_item(); ongoing_translation.check_for_errors(tcx.sess); + let start_time = Instant::now(); + let module = { let _timing_guard = time_graph .as_ref() @@ -1108,10 +1121,18 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, module }; + let time_to_translate = Instant::now().duration_since(start_time); + + // We assume that the cost to run LLVM on a CGU is proportional to + // the time we needed for translating it. + let cost = time_to_translate.as_secs() * 1_000_000_000 + + time_to_translate.subsec_nanos() as u64; + let is_last_cgu = (cgu_index + 1) == codegen_unit_count; ongoing_translation.submit_translated_module_to_llvm(tcx.sess, module, + cost, is_last_cgu); ongoing_translation.check_for_errors(tcx.sess); } diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 83835cb794ab..5a4a5b95cf90 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -39,7 +39,6 @@ use syntax_pos::symbol::Symbol; use std::sync::Arc; extern crate flate2; -extern crate crossbeam; extern crate libc; extern crate owning_ref; #[macro_use] extern crate rustc; @@ -55,6 +54,7 @@ extern crate rustc_const_math; extern crate rustc_bitflags; extern crate rustc_demangle; extern crate jobserver; +extern crate num_cpus; #[macro_use] extern crate log; #[macro_use] extern crate syntax; From a9a0ea921b20f64f0253235704889a2950f72535 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 31 Jul 2017 14:51:47 +0200 Subject: [PATCH 103/213] async-llvm(25): Restore -Ztime-passes output for trans and LLVM. --- src/librustc/util/common.rs | 32 +++++++++++++++++++---- src/librustc_trans/back/write.rs | 45 ++++++++++++++++++++++++++++---- src/librustc_trans/base.rs | 14 ++++++++-- 3 files changed, 79 insertions(+), 12 deletions(-) diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index 17564671a1e3..244b7f359688 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -57,6 +57,32 @@ pub fn time(do_it: bool, what: &str, f: F) -> T where let rv = f(); let dur = start.elapsed(); + print_time_passes_entry_internal(what, dur); + + TIME_DEPTH.with(|slot| slot.set(old)); + + rv +} + +pub fn print_time_passes_entry(do_it: bool, what: &str, dur: Duration) { + if !do_it { + return + } + + let old = TIME_DEPTH.with(|slot| { + let r = slot.get(); + slot.set(r + 1); + r + }); + + print_time_passes_entry_internal(what, dur); + + TIME_DEPTH.with(|slot| slot.set(old)); +} + +fn print_time_passes_entry_internal(what: &str, dur: Duration) { + let indentation = TIME_DEPTH.with(|slot| slot.get()); + let mem_string = match get_resident() { Some(n) => { let mb = n as f64 / 1_000_000.0; @@ -65,14 +91,10 @@ pub fn time(do_it: bool, what: &str, f: F) -> T where None => "".to_owned(), }; println!("{}time: {}{}\t{}", - repeat(" ").take(old).collect::(), + repeat(" ").take(indentation).collect::(), duration_to_secs_str(dur), mem_string, what); - - TIME_DEPTH.with(|slot| slot.set(old)); - - rv } // Hack up our own formatting for the duration to make it easier for scripts diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 4e68fa8ce40c..b3b155c88100 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -23,7 +23,7 @@ use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef}; use llvm::SMDiagnosticRef; use {CrateTranslation, ModuleSource, ModuleTranslation, CompiledModule, ModuleKind}; use rustc::hir::def_id::CrateNum; -use rustc::util::common::{time, time_depth, set_time_depth, path2cstr}; +use rustc::util::common::{time, time_depth, set_time_depth, path2cstr, print_time_passes_entry}; use rustc::util::fs::{link_or_copy, rename_or_copy_remove}; use errors::{self, Handler, Level, DiagnosticBuilder, FatalError}; use errors::emitter::{Emitter}; @@ -44,6 +44,7 @@ use std::str; use std::sync::Arc; use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; +use std::time::Instant; use std::thread; use libc::{c_uint, c_void, c_char, size_t}; @@ -498,9 +499,9 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, diag_handler.abort_if_errors(); // Finally, run the actual optimization passes - time(config.time_passes, &format!("llvm function passes [{}]", cgcx.worker), || + time(config.time_passes, &format!("llvm function passes [{}]", module_name.unwrap()), || llvm::LLVMRustRunFunctionPassManager(fpm, llmod)); - time(config.time_passes, &format!("llvm module passes [{}]", cgcx.worker), || + time(config.time_passes, &format!("llvm module passes [{}]", module_name.unwrap()), || llvm::LLVMRunPassManager(mpm, llmod)); // Deallocate managers that we're now done with @@ -563,7 +564,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, llvm::LLVMWriteBitcodeToFile(llmod, bc_out_c.as_ptr()); } - time(config.time_passes, &format!("codegen passes [{}]", cgcx.worker), + time(config.time_passes, &format!("codegen passes [{}]", module_name.unwrap()), || -> Result<(), FatalError> { if config.emit_ir { let out = output_names.temp_path(OutputType::LlvmAssembly, module_name); @@ -756,6 +757,11 @@ pub fn start_async_translation(sess: &Session, metadata_config.set_flags(sess, no_builtins); allocator_config.set_flags(sess, no_builtins); + // Exclude metadata and allocator modules from time_passes output, since + // they throw off the "LLVM passes" measurement. + metadata_config.time_passes = false; + allocator_config.time_passes = false; + let client = sess.jobserver_from_env.clone().unwrap_or_else(|| { // Pick a "reasonable maximum" if we don't otherwise have a jobserver in // our environment, capping out at 32 so we don't take everything down @@ -1266,6 +1272,9 @@ fn start_executing_work(sess: &Session, // manner we can ensure that the maximum number of parallel workers is // capped at any one point in time. return thread::spawn(move || { + // We pretend to be within the top-level LLVM time-passes task here: + set_time_depth(1); + let max_workers = ::num_cpus::get(); let mut worker_id_counter = 0; let mut free_worker_ids = Vec::new(); @@ -1298,6 +1307,8 @@ fn start_executing_work(sess: &Session, let mut main_thread_worker_state = MainThreadWorkerState::Idle; let mut running = 0; + let mut llvm_start_time = None; + // Run the message loop while there's still anything that needs message // processing: while !translation_done || @@ -1323,6 +1334,7 @@ fn start_executing_work(sess: &Session, worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() }; + maybe_start_llvm_timer(&item, &mut llvm_start_time); main_thread_worker_state = MainThreadWorkerState::LLVMing; spawn_work(cgcx, item); } @@ -1338,7 +1350,7 @@ fn start_executing_work(sess: &Session, worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() }; - + maybe_start_llvm_timer(&item, &mut llvm_start_time); main_thread_worker_state = MainThreadWorkerState::LLVMing; spawn_work(cgcx, item); } @@ -1358,6 +1370,8 @@ fn start_executing_work(sess: &Session, while work_items.len() > 0 && running < tokens.len() { let (item, _) = work_items.pop().unwrap(); + maybe_start_llvm_timer(&item, &mut llvm_start_time); + let cgcx = CodegenContext { worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() @@ -1465,6 +1479,16 @@ fn start_executing_work(sess: &Session, } } + if let Some(llvm_start_time) = llvm_start_time { + let total_llvm_time = Instant::now().duration_since(llvm_start_time); + // This is the top-level timing for all of LLVM, set the time-depth + // to zero. + set_time_depth(0); + print_time_passes_entry(cgcx.time_passes, + "LLVM passes", + total_llvm_time); + } + CompiledModules { modules: compiled_modules, metadata_module: compiled_metadata_module.unwrap(), @@ -1480,6 +1504,17 @@ fn start_executing_work(sess: &Session, // Tune me, plz. items_in_queue >= max_workers.saturating_sub(workers_running / 2) } + + fn maybe_start_llvm_timer(work_item: &WorkItem, + llvm_start_time: &mut Option) { + // We keep track of the -Ztime-passes output manually, + // since the closure-based interface does not fit well here. + if work_item.config.time_passes { + if llvm_start_time.is_none() { + *llvm_start_time = Some(Instant::now()); + } + } + } } pub const TRANS_WORKER_ID: usize = ::std::usize::MAX; diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index e4a763455284..70283ea55c5c 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -43,7 +43,7 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::dep_graph::AssertDepGraphSafe; use rustc::middle::cstore::LinkMeta; use rustc::hir::map as hir_map; -use rustc::util::common::time; +use rustc::util::common::{time, print_time_passes_entry}; use rustc::session::config::{self, NoDebugInfo, OutputFilenames, OutputType}; use rustc::session::Session; use rustc_incremental::{self, IncrementalHashesMap}; @@ -80,7 +80,7 @@ use libc::c_uint; use std::ffi::{CStr, CString}; use std::str; use std::sync::Arc; -use std::time::Instant; +use std::time::{Instant, Duration}; use std::i32; use syntax_pos::Span; use syntax::attr; @@ -1093,6 +1093,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, codegen_units }; + let mut total_trans_time = Duration::new(0, 0); + for (cgu_index, cgu) in codegen_units.into_iter().enumerate() { ongoing_translation.wait_for_signal_to_translate_item(); ongoing_translation.check_for_errors(tcx.sess); @@ -1128,6 +1130,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let cost = time_to_translate.as_secs() * 1_000_000_000 + time_to_translate.subsec_nanos() as u64; + total_trans_time += time_to_translate; + let is_last_cgu = (cgu_index + 1) == codegen_unit_count; ongoing_translation.submit_translated_module_to_llvm(tcx.sess, @@ -1137,6 +1141,12 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ongoing_translation.check_for_errors(tcx.sess); } + // Since the main thread is sometimes blocked during trans, we keep track + // -Ztime-passes output manually. + print_time_passes_entry(tcx.sess.time_passes(), + "translate to LLVM IR", + total_trans_time); + if let Some(module_dispositions) = module_dispositions { assert_module_sources::assert_module_sources(tcx, &module_dispositions); } From cacc31f8a348a97da8681c0e55dd106818b7c8cd Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 31 Jul 2017 15:41:41 +0200 Subject: [PATCH 104/213] async-llvm(26): Print error when failing to acquire Jobserver token. --- src/librustc_trans/back/write.rs | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index b3b155c88100..85860f0e33a3 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -1389,21 +1389,25 @@ fn start_executing_work(sess: &Session, // this to spawn a new unit of work, or it may get dropped // immediately if we have no more work to spawn. Message::Token(token) => { - if let Ok(token) = token { - tokens.push(token); + match token { + Ok(token) => { + tokens.push(token); - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - // If the main thread token is used for LLVM work - // at the moment, we turn that thread into a regular - // LLVM worker thread, so the main thread is free - // to react to translation demand. - main_thread_worker_state = MainThreadWorkerState::Idle; - running += 1; + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to translation demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } + } + Err(e) => { + let msg = &format!("failed to acquire jobserver token: {}", e); + shared_emitter.fatal(msg); + // Exit the coordinator thread + panic!() } - } else { - shared_emitter.fatal("failed to acquire jobserver token"); - // Exit the coordinator thread - panic!() } } From 1057a728f5a39458fae34295222197e68d4db8c9 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Mon, 31 Jul 2017 15:54:16 +0300 Subject: [PATCH 105/213] fix `-Z treat-err-as-bug` --- src/librustc_errors/diagnostic_builder.rs | 12 ++++++------ src/librustc_errors/lib.rs | 5 +---- src/test/run-make/treat-err-as-bug/Makefile | 5 +++++ src/test/run-make/treat-err-as-bug/err.rs | 13 +++++++++++++ 4 files changed, 25 insertions(+), 10 deletions(-) create mode 100644 src/test/run-make/treat-err-as-bug/Makefile create mode 100644 src/test/run-make/treat-err-as-bug/err.rs diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs index 6f6470089d77..5d7c5e2829a3 100644 --- a/src/librustc_errors/diagnostic_builder.rs +++ b/src/librustc_errors/diagnostic_builder.rs @@ -82,26 +82,27 @@ impl<'a> DiagnosticBuilder<'a> { return; } - match self.level { + let is_error = match self.level { Level::Bug | Level::Fatal | Level::PhaseFatal | Level::Error => { - self.handler.bump_err_count(); + true } Level::Warning | Level::Note | Level::Help | Level::Cancelled => { + false } - } + }; self.handler.emitter.borrow_mut().emit(&self); self.cancel(); - if self.level == Level::Error { - self.handler.panic_if_treat_err_as_bug(); + if is_error { + self.handler.bump_err_count(); } // if self.is_fatal() { @@ -210,4 +211,3 @@ impl<'a> Drop for DiagnosticBuilder<'a> { } } } - diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs index e873137444d2..159d2c7a2df1 100644 --- a/src/librustc_errors/lib.rs +++ b/src/librustc_errors/lib.rs @@ -399,7 +399,6 @@ impl Handler { pub fn span_fatal>(&self, sp: S, msg: &str) -> FatalError { self.emit(&sp.into(), msg, Fatal); - self.panic_if_treat_err_as_bug(); FatalError } pub fn span_fatal_with_code>(&self, @@ -408,12 +407,10 @@ impl Handler { code: &str) -> FatalError { self.emit_with_code(&sp.into(), msg, code, Fatal); - self.panic_if_treat_err_as_bug(); FatalError } pub fn span_err>(&self, sp: S, msg: &str) { self.emit(&sp.into(), msg, Error); - self.panic_if_treat_err_as_bug(); } pub fn mut_span_err<'a, S: Into>(&'a self, sp: S, @@ -425,7 +422,6 @@ impl Handler { } pub fn span_err_with_code>(&self, sp: S, msg: &str, code: &str) { self.emit_with_code(&sp.into(), msg, code, Error); - self.panic_if_treat_err_as_bug(); } pub fn span_warn>(&self, sp: S, msg: &str) { self.emit(&sp.into(), msg, Warning); @@ -494,6 +490,7 @@ impl Handler { } pub fn bump_err_count(&self) { + self.panic_if_treat_err_as_bug(); self.err_count.set(self.err_count.get() + 1); } diff --git a/src/test/run-make/treat-err-as-bug/Makefile b/src/test/run-make/treat-err-as-bug/Makefile new file mode 100644 index 000000000000..a8fa2d4e0f82 --- /dev/null +++ b/src/test/run-make/treat-err-as-bug/Makefile @@ -0,0 +1,5 @@ +-include ../tools.mk + +all: + $(RUSTC) err.rs -Z treat-err-as-bug 2>&1 \ + | grep -q "panicked at 'encountered error with .-Z treat_err_as_bug'" diff --git a/src/test/run-make/treat-err-as-bug/err.rs b/src/test/run-make/treat-err-as-bug/err.rs new file mode 100644 index 000000000000..078495663acc --- /dev/null +++ b/src/test/run-make/treat-err-as-bug/err.rs @@ -0,0 +1,13 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![crate_type="rlib"] + +pub static C: u32 = 0-1; From 93db1f9923d23c905c5cd8e7c50c5927c7c72a24 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Mon, 31 Jul 2017 17:42:42 +0300 Subject: [PATCH 106/213] trans::mir::constant - fix assignment error recovery We used to not store anything when the RHS of an assignment returned an error, which caused ICEs downstream. Fixes #43197. --- src/librustc_trans/mir/constant.rs | 62 +++++++++++++---------- src/test/ui/const-eval/issue-43197.rs | 21 ++++++++ src/test/ui/const-eval/issue-43197.stderr | 28 ++++++++++ 3 files changed, 84 insertions(+), 27 deletions(-) create mode 100644 src/test/ui/const-eval/issue-43197.rs create mode 100644 src/test/ui/const-eval/issue-43197.stderr diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 98e774a29877..393fa9c0c8e0 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -222,15 +222,24 @@ struct MirConstContext<'a, 'tcx: 'a> { substs: &'tcx Substs<'tcx>, /// Values of locals in a constant or const fn. - locals: IndexVec>> + locals: IndexVec, ConstEvalErr<'tcx>>>> } +fn add_err<'tcx, U, V>(failure: &mut Result>, + value: &Result>) +{ + if let &Err(ref err) = value { + if failure.is_ok() { + *failure = Err(err.clone()); + } + } +} impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn new(ccx: &'a CrateContext<'a, 'tcx>, mir: &'a mir::Mir<'tcx>, substs: &'tcx Substs<'tcx>, - args: IndexVec>) + args: IndexVec, ConstEvalErr<'tcx>>>) -> MirConstContext<'a, 'tcx> { let mut context = MirConstContext { ccx: ccx, @@ -249,7 +258,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { fn trans_def(ccx: &'a CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, - args: IndexVec>) + args: IndexVec, ConstEvalErr<'tcx>>>) -> Result, ConstEvalErr<'tcx>> { let instance = monomorphize::resolve(ccx.shared(), def_id, substs); let mir = ccx.tcx().instance_mir(instance.def); @@ -278,10 +287,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::StatementKind::Assign(ref dest, ref rvalue) => { let ty = dest.ty(self.mir, tcx); let ty = self.monomorphize(&ty).to_ty(tcx); - match self.const_rvalue(rvalue, ty, span) { - Ok(value) => self.store(dest, value, span), - Err(err) => if failure.is_ok() { failure = Err(err); } - } + let value = self.const_rvalue(rvalue, ty, span); + add_err(&mut failure, &value); + self.store(dest, value, span); } mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | @@ -301,9 +309,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::TerminatorKind::Goto { target } => target, mir::TerminatorKind::Return => { failure?; - return Ok(self.locals[mir::RETURN_POINTER].unwrap_or_else(|| { + return self.locals[mir::RETURN_POINTER].clone().unwrap_or_else(|| { span_bug!(span, "no returned value in constant"); - })); + }); } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => { @@ -342,33 +350,30 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let mut arg_vals = IndexVec::with_capacity(args.len()); for arg in args { - match self.const_operand(arg, span) { - Ok(arg) => { arg_vals.push(arg); }, - Err(err) => if failure.is_ok() { failure = Err(err); } - } + let arg_val = self.const_operand(arg, span); + add_err(&mut failure, &arg_val); + arg_vals.push(arg_val); } if let Some((ref dest, target)) = *destination { - if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic { - let value = match &tcx.item_name(def_id).as_str()[..] { + let result = if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic { + match &tcx.item_name(def_id).as_str()[..] { "size_of" => { let llval = C_uint(self.ccx, self.ccx.size_of(substs.type_at(0))); - Const::new(llval, tcx.types.usize) + Ok(Const::new(llval, tcx.types.usize)) } "min_align_of" => { let llval = C_uint(self.ccx, self.ccx.align_of(substs.type_at(0))); - Const::new(llval, tcx.types.usize) + Ok(Const::new(llval, tcx.types.usize)) } _ => span_bug!(span, "{:?} in constant", terminator.kind) - }; - self.store(dest, value, span); - } else { - match MirConstContext::trans_def(self.ccx, def_id, substs, arg_vals) { - Ok(value) => self.store(dest, value, span), - Err(err) => if failure.is_ok() { failure = Err(err); } } - } + } else { + MirConstContext::trans_def(self.ccx, def_id, substs, arg_vals) + }; + add_err(&mut failure, &result); + self.store(dest, result, span); target } else { span_bug!(span, "diverging {:?} in constant", terminator.kind); @@ -379,7 +384,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } - fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) { + fn store(&mut self, + dest: &mir::Lvalue<'tcx>, + value: Result, ConstEvalErr<'tcx>>, + span: Span) { if let mir::Lvalue::Local(index) = *dest { self.locals[index] = Some(value); } else { @@ -392,9 +400,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let tcx = self.ccx.tcx(); if let mir::Lvalue::Local(index) = *lvalue { - return Ok(self.locals[index].unwrap_or_else(|| { + return self.locals[index].clone().unwrap_or_else(|| { span_bug!(span, "{:?} not initialized", lvalue) - }).as_lvalue()); + }).map(|v| v.as_lvalue()); } let lvalue = match *lvalue { diff --git a/src/test/ui/const-eval/issue-43197.rs b/src/test/ui/const-eval/issue-43197.rs new file mode 100644 index 000000000000..1d4ded6e7123 --- /dev/null +++ b/src/test/ui/const-eval/issue-43197.rs @@ -0,0 +1,21 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(const_fn)] + +const fn foo(x: u32) -> u32 { + x +} + +fn main() { + const X: u32 = 0-1; + const Y: u32 = foo(0-1); + println!("{} {}", X, Y); +} diff --git a/src/test/ui/const-eval/issue-43197.stderr b/src/test/ui/const-eval/issue-43197.stderr new file mode 100644 index 000000000000..5ff80060eac7 --- /dev/null +++ b/src/test/ui/const-eval/issue-43197.stderr @@ -0,0 +1,28 @@ +warning: constant evaluation error: attempt to subtract with overflow. This will become a HARD ERROR in the future + --> $DIR/issue-43197.rs:18:20 + | +18 | const X: u32 = 0-1; + | ^^^ + | + = note: #[warn(const_err)] on by default + +warning: constant evaluation error: attempt to subtract with overflow. This will become a HARD ERROR in the future + --> $DIR/issue-43197.rs:19:20 + | +19 | const Y: u32 = foo(0-1); + | ^^^^^^^^ + +error[E0080]: constant evaluation error + --> $DIR/issue-43197.rs:18:20 + | +18 | const X: u32 = 0-1; + | ^^^ attempt to subtract with overflow + +error[E0080]: constant evaluation error + --> $DIR/issue-43197.rs:19:24 + | +19 | const Y: u32 = foo(0-1); + | ^^^ attempt to subtract with overflow + +error: aborting due to 2 previous errors + From 8f67f1efaf792a0c3ef629e1e62e53eba7365a1c Mon Sep 17 00:00:00 2001 From: Niko Matsakis Date: Mon, 31 Jul 2017 18:13:49 +0300 Subject: [PATCH 107/213] add comments from arielb1 --- src/librustc_typeck/check/_match.rs | 43 +++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index 01d2986a53ca..eaff8e7b8ace 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -410,6 +410,49 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // is problematic as the HIR is being scraped, but ref bindings may be // implicit after #42640. We need to make sure that pat_adjustments // (once introduced) is populated by the time we get here. + // + // arielb1 [writes here in this comment thread][c] that there + // is certainly *some* potential danger, e.g. for an example + // like: + // + // [c]: https://github.com/rust-lang/rust/pull/43399#discussion_r130223956 + // + // ``` + // let Foo(x) = f()[0]; + // ``` + // + // Then if the pattern matches by reference, we want to match + // `f()[0]` as a lexpr, so we can't allow it to be + // coerced. But if the pattern matches by value, `f()[0]` is + // still syntactically a lexpr, but we *do* want to allow + // coercions. + // + // However, *likely* we are ok with allowing coercions to + // happen if there are no explicit ref mut patterns - all + // implicit ref mut patterns must occur behind a reference, so + // they will have the "correct" variance and lifetime. + // + // This does mean that the following pattern would be legal: + // + // ``` + // struct Foo(Bar); + // struct Bar(u32); + // impl Deref for Foo { + // type Target = Bar; + // fn deref(&self) -> &Bar { &self.0 } + // } + // impl DerefMut for Foo { + // fn deref_mut(&mut self) -> &mut Bar { &mut self.0 } + // } + // fn foo(x: &mut Foo) { + // { + // let Bar(z): &mut Bar = x; + // *z = 42; + // } + // assert_eq!(foo.0.0, 42); + // } + // ``` + let contains_ref_bindings = arms.iter() .filter_map(|a| a.contains_explicit_ref_binding()) .max_by_key(|m| match *m { From b1e043e9e965392b9c018c328ec580e7bd78af24 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Mon, 31 Jul 2017 18:51:39 +0200 Subject: [PATCH 108/213] async-llvm(27): Move #[rustc_error] check to an earlier point in order to restore some test expections. --- src/librustc_trans/base.rs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 70283ea55c5c..14c73de64bc7 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -650,9 +650,23 @@ pub fn set_link_section(ccx: &CrateContext, } } +// check for the #[rustc_error] annotation, which forces an +// error in trans. This is used to write compile-fail tests +// that actually test that compilation succeeds without +// reporting an error. +fn check_for_rustc_errors_attr(tcx: TyCtxt) { + if let Some((id, span)) = *tcx.sess.entry_fn.borrow() { + let main_def_id = tcx.hir.local_def_id(id); + + if tcx.has_attr(main_def_id, "rustc_error") { + tcx.sess.span_fatal(span, "compilation successful"); + } + } +} + /// Create the `main` function which will initialise the rust runtime and call /// users main function. -pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { +fn maybe_create_entry_wrapper(ccx: &CrateContext) { let (main_def_id, span) = match *ccx.sess().entry_fn.borrow() { Some((id, span)) => { (ccx.tcx().hir.local_def_id(id), span) @@ -660,14 +674,6 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { None => return, }; - // check for the #[rustc_error] annotation, which forces an - // error in trans. This is used to write compile-fail tests - // that actually test that compilation succeeds without - // reporting an error. - if ccx.tcx().has_attr(main_def_id, "rustc_error") { - ccx.tcx().sess.span_fatal(span, "compilation successful"); - } - let instance = Instance::mono(ccx.tcx(), main_def_id); if !ccx.codegen_unit().contains_item(&TransItem::Fn(instance)) { @@ -928,6 +934,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, incremental_hashes_map: IncrementalHashesMap, output_filenames: &OutputFilenames) -> OngoingCrateTranslation { + check_for_rustc_errors_attr(tcx); + // Be careful with this krate: obviously it gives access to the // entire contents of the krate. So if you push any subtasks of // `TransCrate`, you need to be careful to register "reads" of the From 6ff7c8fa047f98bfc6f1d7c9abdd64bc557add32 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 31 Jul 2017 13:20:37 -0700 Subject: [PATCH 109/213] more documentation --- src/librustc/mir/mod.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 3ee86dbdc846..f7ef542544cc 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -825,7 +825,9 @@ pub enum StatementKind<'tcx> { inputs: Vec> }, - /// Assert the given lvalues to be valid inhabitants of their type. + /// Assert the given lvalues to be valid inhabitants of their type. These statements are + /// currently only interpreted by miri and only generated when "-Z mir-emit-validate" is passed. + /// See for more details. Validate(ValidationOp, Vec>>), /// Mark one terminating point of an extent (i.e. static region). @@ -836,10 +838,19 @@ pub enum StatementKind<'tcx> { Nop, } +/// The `ValidationOp` describes what happens with each of the operands of a +/// `Validate` statement. #[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)] pub enum ValidationOp { + /// Recursively traverse the lvalue following the type and validate that all type + /// invariants are maintained. Furthermore, acquire exclusive/read-only access to the + /// memory reachable from the lvalue. Acquire, + /// Recursive traverse the *mutable* part of the type and relinquish all exclusive + /// access. Release, + /// Recursive traverse the *mutable* part of the type and relinquish all exclusive + /// access *until* the given region ends. Then, access will be recovered. Suspend(CodeExtent), } From 6135461f9a44732a61e2422af20030ebd31486e8 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 31 Jul 2017 13:21:10 -0700 Subject: [PATCH 110/213] CleanEndRegions: use default impl where possible --- src/librustc_mir/transform/clean_end_regions.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/librustc_mir/transform/clean_end_regions.rs b/src/librustc_mir/transform/clean_end_regions.rs index d7ec58384a46..f06b88551d11 100644 --- a/src/librustc_mir/transform/clean_end_regions.rs +++ b/src/librustc_mir/transform/clean_end_regions.rs @@ -65,13 +65,6 @@ impl<'tcx> Visitor<'tcx> for GatherBorrowedRegions { self.super_rvalue(rvalue, location); } - fn visit_statement(&mut self, - block: BasicBlock, - statement: &Statement<'tcx>, - location: Location) { - self.super_statement(block, statement, location); - } - fn visit_ty(&mut self, ty: &Ty<'tcx>, _: Lookup) { // Gather regions that occur in types for re in ty.walk().flat_map(|t| t.regions()) { From 1447daa01ddc6536724eb4f3e10972404da0cd56 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Mon, 31 Jul 2017 16:00:12 +0300 Subject: [PATCH 111/213] remove the span field from `diverge_cleanup` --- src/librustc_mir/build/block.rs | 2 +- src/librustc_mir/build/expr/into.rs | 2 +- src/librustc_mir/build/matches/test.rs | 2 +- src/librustc_mir/build/scope.rs | 27 +++++++++++++++----------- 4 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/librustc_mir/build/block.rs b/src/librustc_mir/build/block.rs index 865174aa272e..4583d80b83dd 100644 --- a/src/librustc_mir/build/block.rs +++ b/src/librustc_mir/build/block.rs @@ -86,7 +86,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let tcx = this.hir.tcx(); // Enter the remainder scope, i.e. the bindings' destruction scope. - this.push_scope(remainder_scope); + this.push_scope((remainder_scope, source_info)); let_extent_stack.push(remainder_scope); // Declare the bindings, which may create a visibility scope. diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index 326c1df69ebe..7ae5d6b0ec19 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -237,7 +237,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { .collect(); let success = this.cfg.start_new_block(); - let cleanup = this.diverge_cleanup(expr_span); + let cleanup = this.diverge_cleanup(); this.cfg.terminate(block, source_info, TerminatorKind::Call { func: fun, args: args, diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index f4d43e041ae8..28386fa598ce 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -306,7 +306,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let bool_ty = self.hir.bool_ty(); let eq_result = self.temp(bool_ty, test.span); let eq_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(test.span); + let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::Call { func: Operand::Constant(box Constant { span: test.span, diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 2244ffde3c9d..2b52198c2506 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -107,6 +107,9 @@ pub struct Scope<'tcx> { /// the extent of this scope within source code. extent: CodeExtent, + /// the span of that extent + extent_span: Span, + /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors, /// but not StorageDead statements, which don't get emitted at all @@ -116,7 +119,7 @@ pub struct Scope<'tcx> { /// * pollutting the cleanup MIR with StorageDead creates /// landing pads even though there's no actual destructors /// * freeing up stack space has no effect during unwinding - pub(super) needs_cleanup: bool, + needs_cleanup: bool, /// set of lvalues to drop when exiting this scope. This starts /// out empty but grows as variables are declared during the @@ -282,7 +285,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd { debug!("in_opt_scope(opt_extent={:?}, block={:?})", opt_extent, block); - if let Some(extent) = opt_extent { self.push_scope(extent.0); } + if let Some(extent) = opt_extent { self.push_scope(extent); } let rv = unpack!(block = f(self)); if let Some(extent) = opt_extent { unpack!(block = self.pop_scope(extent, block)); @@ -301,7 +304,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd { debug!("in_scope(extent={:?}, block={:?})", extent, block); - self.push_scope(extent.0); + self.push_scope(extent); let rv = unpack!(block = f(self)); unpack!(block = self.pop_scope(extent, block)); debug!("in_scope: exiting extent={:?} block={:?}", extent, block); @@ -312,12 +315,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// scope and call `pop_scope` afterwards. Note that these two /// calls must be paired; using `in_scope` as a convenience /// wrapper maybe preferable. - pub fn push_scope(&mut self, extent: CodeExtent) { + pub fn push_scope(&mut self, extent: (CodeExtent, SourceInfo)) { debug!("push_scope({:?})", extent); let vis_scope = self.visibility_scope; self.scopes.push(Scope { visibility_scope: vis_scope, - extent: extent, + extent: extent.0, + extent_span: extent.1.span, needs_cleanup: false, drops: vec![], free: None, @@ -335,7 +339,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { debug!("pop_scope({:?}, {:?})", extent, block); // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup // to make sure all the `cached_block`s are filled in. - self.diverge_cleanup(extent.1.span); + self.diverge_cleanup(); let scope = self.scopes.pop().unwrap(); assert_eq!(scope.extent, extent.0); unpack!(block = build_scope_drops(&mut self.cfg, @@ -618,7 +622,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// This path terminates in Resume. Returns the start of the path. /// See module comment for more details. None indicates there’s no /// cleanup to do at this point. - pub fn diverge_cleanup(&mut self, span: Span) -> Option { + pub fn diverge_cleanup(&mut self) -> Option { if !self.scopes.iter().any(|scope| scope.needs_cleanup) { return None; } @@ -652,7 +656,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; for scope in scopes.iter_mut() { - target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, span, scope, target); + target = build_diverge_scope( + hir.tcx(), cfg, &unit_temp, scope.extent_span, scope, target); } Some(target) } @@ -668,7 +673,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } let source_info = self.source_info(span); let next_target = self.cfg.start_new_block(); - let diverge_target = self.diverge_cleanup(span); + let diverge_target = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::Drop { location: location, @@ -686,7 +691,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { value: Operand<'tcx>) -> BlockAnd<()> { let source_info = self.source_info(span); let next_target = self.cfg.start_new_block(); - let diverge_target = self.diverge_cleanup(span); + let diverge_target = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::DropAndReplace { location: location, @@ -709,7 +714,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let source_info = self.source_info(span); let success_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(span); + let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, source_info, TerminatorKind::Assert { From 85c102757a744956989e5217484a6f650eed3146 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Mon, 31 Jul 2017 23:25:27 +0300 Subject: [PATCH 112/213] rustc_mir: don't build unused unwind cleanup blocks The unused blocks are removed by SimplifyCfg, but they can cause a significant performance slowdown before they are removed. --- src/librustc_mir/build/scope.rs | 89 ++++++++++++++++++---------- src/test/mir-opt/basic_assignment.rs | 26 ++++---- src/test/mir-opt/end_region_4.rs | 38 ++++++------ src/test/mir-opt/end_region_5.rs | 27 ++++----- src/test/mir-opt/end_region_6.rs | 28 ++++----- src/test/mir-opt/end_region_7.rs | 36 +++++------ src/test/mir-opt/end_region_8.rs | 75 ++++++++++++----------- src/test/mir-opt/issue-41110.rs | 15 +++-- 8 files changed, 181 insertions(+), 153 deletions(-) diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 2b52198c2506..bf39e52bd1b2 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -200,6 +200,15 @@ pub struct BreakableScope<'tcx> { pub break_destination: Lvalue<'tcx>, } +impl DropKind { + fn may_panic(&self) -> bool { + match *self { + DropKind::Value { .. } => true, + DropKind::Storage => false + } + } +} + impl<'tcx> Scope<'tcx> { /// Invalidate all the cached blocks in the scope. /// @@ -337,9 +346,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { mut block: BasicBlock) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", extent, block); - // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup - // to make sure all the `cached_block`s are filled in. - self.diverge_cleanup(); + // If we are emitting a `drop` statement, we need to have the cached + // diverge cleanup pads ready in case that drop panics. + let may_panic = + self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic()); + if may_panic { + self.diverge_cleanup(); + } let scope = self.scopes.pop().unwrap(); assert_eq!(scope.extent, extent.0); unpack!(block = build_scope_drops(&mut self.cfg, @@ -370,6 +383,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let len = self.scopes.len(); assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); let tmp = self.get_unit_temp(); + + // If we are emitting a `drop` statement, we need to have the cached + // diverge cleanup pads ready in case that drop panics. + let may_panic = + self.scopes[(len - scope_count)..].iter().any(|s| s.drops.iter().any(|s| s.kind.may_panic())); + if may_panic { + self.diverge_cleanup(); + } + { let mut rest = &mut self.scopes[(len - scope_count)..]; while let Some((scope, rest_)) = {rest}.split_last_mut() { @@ -736,45 +758,48 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, mut block: BasicBlock, arg_count: usize) -> BlockAnd<()> { + debug!("build_scope_drops({:?} -> {:?})", block, scope); let mut iter = scope.drops.iter().rev().peekable(); while let Some(drop_data) = iter.next() { let source_info = scope.source_info(drop_data.span); - if let DropKind::Value { .. } = drop_data.kind { - // Try to find the next block with its cached block - // for us to diverge into in case the drop panics. - let on_diverge = iter.peek().iter().filter_map(|dd| { - match dd.kind { - DropKind::Value { cached_block } => cached_block, - DropKind::Storage => None - } - }).next(); - // If there’s no `cached_block`s within current scope, - // we must look for one in the enclosing scope. - let on_diverge = on_diverge.or_else(||{ - earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() - }); - let next = cfg.start_new_block(); - cfg.terminate(block, source_info, TerminatorKind::Drop { - location: drop_data.location.clone(), - target: next, - unwind: on_diverge - }); - block = next; - } match drop_data.kind { - DropKind::Value { .. } | - DropKind::Storage => { - // Only temps and vars need their storage dead. - match drop_data.location { - Lvalue::Local(index) if index.index() > arg_count => {} - _ => continue - } + DropKind::Value { .. } => { + // Try to find the next block with its cached block + // for us to diverge into in case the drop panics. + let on_diverge = iter.peek().iter().filter_map(|dd| { + match dd.kind { + DropKind::Value { cached_block: None } => + span_bug!(drop_data.span, "cached block not present?"), + DropKind::Value { cached_block } => cached_block, + DropKind::Storage => None + } + }).next(); + // If there’s no `cached_block`s within current scope, + // we must look for one in the enclosing scope. + let on_diverge = on_diverge.or_else(|| { + earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() + }); + let next = cfg.start_new_block(); + cfg.terminate(block, source_info, TerminatorKind::Drop { + location: drop_data.location.clone(), + target: next, + unwind: on_diverge + }); + block = next; + } + DropKind::Storage => {} + } + // Drop the storage for both value and storage drops. + // Only temps and vars need their storage dead. + match drop_data.location { + Lvalue::Local(index) if index.index() > arg_count => { cfg.push(block, Statement { source_info: source_info, kind: StatementKind::StorageDead(drop_data.location.clone()) }); } + _ => continue } } block.unit() diff --git a/src/test/mir-opt/basic_assignment.rs b/src/test/mir-opt/basic_assignment.rs index ef5158a403a9..6afc344ced84 100644 --- a/src/test/mir-opt/basic_assignment.rs +++ b/src/test/mir-opt/basic_assignment.rs @@ -47,39 +47,39 @@ fn main() { // StorageDead(_3); // StorageLive(_4); // _4 = std::option::Option>::None; +// StorageLive(_5); // StorageLive(_6); -// StorageLive(_7); -// _7 = _4; -// replace(_6 <- _7) -> [return: bb6, unwind: bb7]; +// _6 = _4; +// replace(_5 <- _6) -> [return: bb1, unwind: bb7]; // } // bb1: { -// resume; +// drop(_6) -> [return: bb8, unwind: bb5]; // } // bb2: { -// drop(_4) -> bb1; +// resume; // } // bb3: { -// goto -> bb2; +// drop(_4) -> bb2; // } // bb4: { -// drop(_6) -> bb3; +// goto -> bb3; // } // bb5: { -// goto -> bb4; +// drop(_5) -> bb4; // } // bb6: { -// drop(_7) -> [return: bb8, unwind: bb4]; +// goto -> bb5; // } // bb7: { -// drop(_7) -> bb5; +// drop(_6) -> bb6; // } // bb8: { -// StorageDead(_7); +// StorageDead(_6); // _0 = (); -// drop(_6) -> [return: bb9, unwind: bb2]; +// drop(_5) -> [return: bb9, unwind: bb3]; // } // bb9: { -// StorageDead(_6); +// StorageDead(_5); // drop(_4) -> bb10; // } // bb10: { diff --git a/src/test/mir-opt/end_region_4.rs b/src/test/mir-opt/end_region_4.rs index 16ade9f96fd1..bfb1b3b65289 100644 --- a/src/test/mir-opt/end_region_4.rs +++ b/src/test/mir-opt/end_region_4.rs @@ -32,41 +32,41 @@ fn foo(i: i32) { // START rustc.node4.SimplifyCfg-qualify-consts.after.mir // let mut _0: (); // let _1: D; -// let _3: i32; -// let _4: &'6_2rce i32; +// let _2: i32; +// let _3: &'6_2rce i32; // let _7: &'6_4rce i32; -// let mut _5: (); -// let mut _6: i32; -// +// let mut _4: (); +// let mut _5: i32; +// let mut _6: (); // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_2); +// _2 = const 0i32; // StorageLive(_3); -// _3 = const 0i32; -// StorageLive(_4); -// _4 = &'6_2rce _3; -// StorageLive(_6); -// _6 = (*_4); -// _5 = const foo(_6) -> [return: bb2, unwind: bb3]; +// _3 = &'6_2rce _2; +// StorageLive(_5); +// _5 = (*_3); +// _4 = const foo(_5) -> [return: bb1, unwind: bb3]; // } // bb1: { -// resume; -// } -// bb2: { -// StorageDead(_6); +// StorageDead(_5); // StorageLive(_7); -// _7 = &'6_4rce _3; +// _7 = &'6_4rce _2; // _0 = (); // StorageDead(_7); // EndRegion('6_4rce); -// StorageDead(_4); -// EndRegion('6_2rce); // StorageDead(_3); +// EndRegion('6_2rce); +// StorageDead(_2); // drop(_1) -> bb4; // } +// bb2: { +// resume; +// } // bb3: { // EndRegion('6_2rce); -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_1); diff --git a/src/test/mir-opt/end_region_5.rs b/src/test/mir-opt/end_region_5.rs index 513632a4cdf3..773a348a9397 100644 --- a/src/test/mir-opt/end_region_5.rs +++ b/src/test/mir-opt/end_region_5.rs @@ -31,32 +31,31 @@ fn foo(f: F) where F: FnOnce() -> i32 { // let mut _0: (); // let _1: D; // let mut _2: (); -// let mut _3: (); -// let mut _4: [closure@NodeId(18) d: &'19mce D]; -// let mut _5: &'19mce D; -// +// let mut _3: [closure@NodeId(18) d:&'19mce D]; +// let mut _4: &'19mce D; +// let mut _5: (); // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_3); // StorageLive(_4); -// StorageLive(_5); -// _5 = &'19mce _1; -// _4 = [closure@NodeId(18)] { d: _5 }; -// StorageDead(_5); -// _3 = const foo(_4) -> [return: bb2, unwind: bb3]; +// _4 = &'19mce _1; +// _3 = [closure@NodeId(18)] { d: _4 }; +// StorageDead(_4); +// _2 = const foo(_3) -> [return: bb1, unwind: bb3]; // } // bb1: { -// resume; -// } -// bb2: { -// StorageDead(_4); +// StorageDead(_3); // EndRegion('19mce); // _0 = (); // drop(_1) -> bb4; // } +// bb2: { +// resume; +// } // bb3: { // EndRegion('19mce); -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_1); diff --git a/src/test/mir-opt/end_region_6.rs b/src/test/mir-opt/end_region_6.rs index e82556f3ce4b..112c93843e04 100644 --- a/src/test/mir-opt/end_region_6.rs +++ b/src/test/mir-opt/end_region_6.rs @@ -27,35 +27,35 @@ fn foo(f: F) where F: FnOnce() -> i32 { // END RUST SOURCE // START rustc.node4.SimplifyCfg-qualify-consts.after.mir +// fn main() -> () { // let mut _0: (); // let _1: D; // let mut _2: (); -// let mut _3: (); -// let mut _4: [closure@NodeId(22) d:&'23mce D]; -// let mut _5: &'23mce D; -// +// let mut _3: [closure@NodeId(22) d:&'23mce D]; +// let mut _4: &'23mce D; +// let mut _5: (); // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_3); // StorageLive(_4); -// StorageLive(_5); -// _5 = &'23mce _1; -// _4 = [closure@NodeId(22)] { d: _5 }; -// StorageDead(_5); -// _3 = const foo(_4) -> [return: bb2, unwind: bb3]; +// _4 = &'23mce _1; +// _3 = [closure@NodeId(22)] { d: _4 }; +// StorageDead(_4); +// _2 = const foo(_3) -> [return: bb1, unwind: bb3]; // } // bb1: { -// resume; -// } -// bb2: { -// StorageDead(_4); +// StorageDead(_3); // EndRegion('23mce); // _0 = (); // drop(_1) -> bb4; // } +// bb2: { +// resume; +// } // bb3: { // EndRegion('23mce); -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_1); diff --git a/src/test/mir-opt/end_region_7.rs b/src/test/mir-opt/end_region_7.rs index 3fbd3f368659..913986ae816a 100644 --- a/src/test/mir-opt/end_region_7.rs +++ b/src/test/mir-opt/end_region_7.rs @@ -31,18 +31,18 @@ fn foo(f: F) where F: FnOnce() -> i32 { // let mut _0: (); // let _1: D; // let mut _2: (); -// let mut _3: (); -// let mut _4: [closure@NodeId(22) d:D]; -// let mut _5: D; +// let mut _3: [closure@NodeId(22) d:D]; +// let mut _4: D; +// let mut _5: (); // // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_3); // StorageLive(_4); -// StorageLive(_5); -// _5 = _1; -// _4 = [closure@NodeId(22)] { d: _5 }; -// drop(_5) -> [return: bb4, unwind: bb3]; +// _4 = _1; +// _3 = [closure@NodeId(22)] { d: _4 }; +// drop(_4) -> [return: bb4, unwind: bb3]; // } // bb1: { // resume; @@ -51,17 +51,17 @@ fn foo(f: F) where F: FnOnce() -> i32 { // drop(_1) -> bb1; // } // bb3: { -// drop(_4) -> bb2; +// drop(_3) -> bb2; // } // bb4: { -// StorageDead(_5); -// _3 = const foo(_4) -> [return: bb5, unwind: bb3]; +// StorageDead(_4); +// _2 = const foo(_3) -> [return: bb5, unwind: bb3]; // } // bb5: { -// drop(_4) -> [return: bb6, unwind: bb2]; +// drop(_3) -> [return: bb6, unwind: bb2]; // } // bb6: { -// StorageDead(_4); +// StorageDead(_3); // _0 = (); // drop(_1) -> bb7; // } @@ -76,16 +76,16 @@ fn foo(f: F) where F: FnOnce() -> i32 { // fn main::{{closure}}(_1: [closure@NodeId(22) d:D]) -> i32 { // let mut _0: i32; // let _2: &'14_0rce D; -// let mut _3: (); -// let mut _4: i32; +// let mut _3: i32; +// let mut _4: (); // // bb0: { // StorageLive(_2); // _2 = &'14_0rce (_1.0: D); -// StorageLive(_4); -// _4 = ((*_2).0: i32); -// _0 = _4; -// StorageDead(_4); +// StorageLive(_3); +// _3 = ((*_2).0: i32); +// _0 = _3; +// StorageDead(_3); // StorageDead(_2); // EndRegion('14_0rce); // drop(_1) -> bb1; diff --git a/src/test/mir-opt/end_region_8.rs b/src/test/mir-opt/end_region_8.rs index 7fb3f0b91181..dc8f8ea11f51 100644 --- a/src/test/mir-opt/end_region_8.rs +++ b/src/test/mir-opt/end_region_8.rs @@ -29,44 +29,43 @@ fn foo(f: F) where F: FnOnce() -> i32 { // END RUST SOURCE // START rustc.node4.SimplifyCfg-qualify-consts.after.mir // fn main() -> () { -// let mut _0: (); -// let _1: D; -// let _3: &'6_1rce D; -// let mut _2: (); -// let mut _4: (); -// let mut _5: [closure@NodeId(22) r:&'6_1rce D]; -// let mut _6: &'6_1rce D; -// -// bb0: { -// StorageLive(_1); -// _1 = D::{{constructor}}(const 0i32,); -// StorageLive(_3); -// _3 = &'6_1rce _1; -// StorageLive(_5); -// StorageLive(_6); -// _6 = _3; -// _5 = [closure@NodeId(22)] { r: _6 }; -// StorageDead(_6); -// _4 = const foo(_5) -> [return: bb2, unwind: bb3]; -// } -// bb1: { -// resume; -// } -// bb2: { -// StorageDead(_5); -// _0 = (); -// StorageDead(_3); -// EndRegion('6_1rce); -// drop(_1) -> bb4; -// } -// bb3: { -// EndRegion('6_1rce); -// drop(_1) -> bb1; -// } -// bb4: { -// StorageDead(_1); -// return; -// } +// let mut _0: (); +// let _1: D; +// let _2: &'6_1rce D; +// let mut _3: (); +// let mut _4: [closure@NodeId(22) r:&'6_1rce D]; +// let mut _5: &'6_1rce D; +// let mut _6: (); +// bb0: { +// StorageLive(_1); +// _1 = D::{{constructor}}(const 0i32,); +// StorageLive(_2); +// _2 = &'6_1rce _1; +// StorageLive(_4); +// StorageLive(_5); +// _5 = _2; +// _4 = [closure@NodeId(22)] { r: _5 }; +// StorageDead(_5); +// _3 = const foo(_4) -> [return: bb1, unwind: bb3]; +// } +// bb1: { +// StorageDead(_4); +// _0 = (); +// StorageDead(_2); +// EndRegion('6_1rce); +// drop(_1) -> bb4; +// } +// bb2: { +// resume; +// } +// bb3: { +// EndRegion('6_1rce); +// drop(_1) -> bb2; +// } +// bb4: { +// StorageDead(_1); +// return; +// } // } // END rustc.node4.SimplifyCfg-qualify-consts.after.mir diff --git a/src/test/mir-opt/issue-41110.rs b/src/test/mir-opt/issue-41110.rs index fec635b3abf6..1daa18256dce 100644 --- a/src/test/mir-opt/issue-41110.rs +++ b/src/test/mir-opt/issue-41110.rs @@ -34,18 +34,23 @@ impl S { // END RUST SOURCE // START rustc.node4.ElaborateDrops.after.mir +// let mut _0: (); +// let _1: (); // let mut _2: S; -// let mut _3: (); +// let mut _3: S; // let mut _4: S; -// let mut _5: S; +// let mut _5: (); // let mut _6: bool; // // bb0: { // END rustc.node4.ElaborateDrops.after.mir // START rustc.node13.ElaborateDrops.after.mir -// let mut _2: (); -// let mut _4: (); -// let mut _5: S; +// let mut _0: (); +// let _1: S; +// let mut _2: S; +// let mut _3: (); +// let mut _4: S; +// let mut _5: (); // let mut _6: S; // let mut _7: bool; // From ca3105cfdf4221d6855f16f2f841d359248c349a Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 1 Aug 2017 00:09:32 +0300 Subject: [PATCH 113/213] use an iterator when visiting MIR basic blocks I saw MIR cache invalidation somewhat hot on my profiler when per-BB indexin was used. That shouldn't matter much, but there is no good reason not to use an iterator. --- src/librustc/mir/visit.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index fd3a9f8cd2d9..a24b2ad0e432 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -14,7 +14,6 @@ use ty::subst::Substs; use ty::{ClosureSubsts, Region, Ty}; use mir::*; use rustc_const_math::ConstUsize; -use rustc_data_structures::indexed_vec::Idx; use syntax_pos::Span; // # The MIR Visitor @@ -260,9 +259,15 @@ macro_rules! make_mir_visitor { fn super_mir(&mut self, mir: & $($mutability)* Mir<'tcx>) { - for index in 0..mir.basic_blocks().len() { - let block = BasicBlock::new(index); - self.visit_basic_block_data(block, &$($mutability)* mir[block]); + // for best performance, we want to use an iterator rather + // than a for-loop, to avoid calling Mir::invalidate for + // each basic block. + macro_rules! basic_blocks { + (mut) => (mir.basic_blocks_mut().iter_enumerated_mut()); + () => (mir.basic_blocks().iter_enumerated()); + }; + for (bb, data) in basic_blocks!($($mutability)*) { + self.visit_basic_block_data(bb, data); } for scope in &$($mutability)* mir.visibility_scopes { From 5b99523de9cb362a2328829959618aef0becb38e Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 1 Aug 2017 00:10:46 +0300 Subject: [PATCH 114/213] rustc_mir::transform::simplify - remove nops first Removing nops can allow more basic blocks to be merged, but merging basic blocks can't allow for more nops to be removed, so we should remove nops first. This doesn't matter *that* much, because normally we run SimplifyCfg several times, but there's no reason not to do it. --- src/librustc_mir/transform/simplify.rs | 4 ++-- src/test/mir-opt/basic_assignment.rs | 22 ++++++++-------------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/src/librustc_mir/transform/simplify.rs b/src/librustc_mir/transform/simplify.rs index d5b79c0d1c38..a1d56ccd874a 100644 --- a/src/librustc_mir/transform/simplify.rs +++ b/src/librustc_mir/transform/simplify.rs @@ -105,6 +105,8 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } pub fn simplify(mut self) { + self.strip_nops(); + loop { let mut changed = false; @@ -141,8 +143,6 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { if !changed { break } } - - self.strip_nops() } // Collapse a goto chain starting from `start` diff --git a/src/test/mir-opt/basic_assignment.rs b/src/test/mir-opt/basic_assignment.rs index 6afc344ced84..d3bf7f68785d 100644 --- a/src/test/mir-opt/basic_assignment.rs +++ b/src/test/mir-opt/basic_assignment.rs @@ -50,10 +50,10 @@ fn main() { // StorageLive(_5); // StorageLive(_6); // _6 = _4; -// replace(_5 <- _6) -> [return: bb1, unwind: bb7]; +// replace(_5 <- _6) -> [return: bb1, unwind: bb5]; // } // bb1: { -// drop(_6) -> [return: bb8, unwind: bb5]; +// drop(_6) -> [return: bb6, unwind: bb4]; // } // bb2: { // resume; @@ -62,27 +62,21 @@ fn main() { // drop(_4) -> bb2; // } // bb4: { -// goto -> bb3; +// drop(_5) -> bb3; // } // bb5: { -// drop(_5) -> bb4; +// drop(_6) -> bb4; // } // bb6: { -// goto -> bb5; -// } -// bb7: { -// drop(_6) -> bb6; -// } -// bb8: { // StorageDead(_6); // _0 = (); -// drop(_5) -> [return: bb9, unwind: bb3]; +// drop(_5) -> [return: bb7, unwind: bb3]; // } -// bb9: { +// bb7: { // StorageDead(_5); -// drop(_4) -> bb10; +// drop(_4) -> bb8; // } -// bb10: { +// bb8: { // StorageDead(_4); // StorageDead(_2); // StorageDead(_1); From 1636a2cf41d79630ecc94f9a8b6d9e8bb501b048 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Mon, 31 Jul 2017 15:34:05 -0700 Subject: [PATCH 115/213] Link LLVM tools dynamically Set `LLVM_LINK_LLVM_DYLIB=ON` -- "If enabled, tools will be linked with the libLLVM shared library." Rust doesn't ship any of the LLVM tools, and only needs a few at all for some test cases, so statically linking the tools is just a waste of space. I've also had memory issues on slower machines with LLVM debuginfo enabled, when several tools start linking in parallel consuming several GBs each. With the default configuration, `build/x86_64-unknown-linux-gnu/llvm` was 1.5GB before, now down to 731MB. The difference is more drastic with `--enable-llvm-release-debuginfo`, from 28GB to "only" 13GB. This does not change the linking behavior of `rustc_llvm`. --- src/bootstrap/native.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index cfd20b02aaf6..595f90be1dd6 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -125,6 +125,7 @@ impl Step for Llvm { .define("WITH_POLLY", "OFF") .define("LLVM_ENABLE_TERMINFO", "OFF") .define("LLVM_ENABLE_LIBEDIT", "OFF") + .define("LLVM_LINK_LLVM_DYLIB", "ON") .define("LLVM_PARALLEL_COMPILE_JOBS", build.jobs().to_string()) .define("LLVM_TARGET_ARCH", target.split('-').next().unwrap()) .define("LLVM_DEFAULT_TARGET_TRIPLE", target); From 5e426e10683d851a530e17d33bf6454d958b7d46 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 31 Jul 2017 15:46:36 -0700 Subject: [PATCH 116/213] optionally only emit basic validation for functions containing unsafe block / unsafe function --- src/librustc/hir/mod.rs | 6 +- src/librustc/session/config.rs | 5 +- src/librustc_mir/transform/add_validation.rs | 174 ++++++++++++++----- src/librustc_mir/transform/erase_regions.rs | 4 +- src/test/mir-opt/validate_1.rs | 2 +- src/test/mir-opt/validate_2.rs | 2 +- src/test/mir-opt/validate_3.rs | 20 ++- 7 files changed, 155 insertions(+), 58 deletions(-) diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index cc0d49c1a363..85d9745246f6 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -49,7 +49,7 @@ use rustc_data_structures::indexed_vec; use std::collections::BTreeMap; use std::fmt; -/// HIR doesn't commit to a concrete storage type and have its own alias for a vector. +/// HIR doesn't commit to a concrete storage type and has its own alias for a vector. /// It can be `Vec`, `P<[T]>` or potentially `Box<[T]>`, or some other container with similar /// behavior. Unlike AST, HIR is mostly a static structure, so we can use an owned slice instead /// of `Vec` to avoid keeping extra capacity. @@ -76,14 +76,14 @@ pub mod pat_util; pub mod print; pub mod svh; -/// A HirId uniquely identifies a node in the HIR of then current crate. It is +/// A HirId uniquely identifies a node in the HIR of the current crate. It is /// composed of the `owner`, which is the DefIndex of the directly enclosing /// hir::Item, hir::TraitItem, or hir::ImplItem (i.e. the closest "item-like"), /// and the `local_id` which is unique within the given owner. /// /// This two-level structure makes for more stable values: One can move an item /// around within the source code, or add or remove stuff before it, without -/// the local_id part of the HirId changing, which is a very useful property +/// the local_id part of the HirId changing, which is a very useful property in /// incremental compilation where we have to persist things through changes to /// the code base. #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug, diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index c5ddcb597cbb..c8b9412c5663 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1025,8 +1025,9 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "the directory the MIR is dumped into"), dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED], "if set, exclude the pass number when dumping MIR (used in tests)"), - mir_emit_validate: bool = (false, parse_bool, [TRACKED], - "emit Validate MIR statements, interpreted e.g. by miri"), + mir_emit_validate: usize = (0, parse_uint, [TRACKED], + "emit Validate MIR statements, interpreted e.g. by miri (0: do not emit; 1: if function \ + contains unsafe block, only validate arguments; 2: always emit full validation)"), perf_stats: bool = (false, parse_bool, [UNTRACKED], "print some performance-related statistics"), hir_stats: bool = (false, parse_bool, [UNTRACKED], diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index ee472c616f65..1329378fbef0 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -14,6 +14,8 @@ //! of MIR building, and only after this pass we think of the program has having the //! normal MIR semantics. +use syntax_pos::Span; +use syntax::ast::NodeId; use rustc::ty::{self, TyCtxt, RegionKind}; use rustc::hir; use rustc::mir::*; @@ -80,15 +82,78 @@ fn lval_context<'a, 'tcx, D>( } } +/// Check if this function contains an unsafe block or is an unsafe function. +fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> bool { + use rustc::hir::intravisit::{self, Visitor}; + + let fn_node_id = match src { + MirSource::Fn(node_id) => node_id, + _ => return false, // only functions can have unsafe + }; + let fn_item = tcx.hir.expect_item(fn_node_id); + + struct FindUnsafe<'b, 'tcx> where 'tcx : 'b { + map: &'b hir::map::Map<'tcx>, + found_unsafe: bool, + } + let mut finder = FindUnsafe { map: &tcx.hir, found_unsafe: false }; + finder.visit_item(fn_item); + + impl<'b, 'tcx> Visitor<'tcx> for FindUnsafe<'b, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> { + intravisit::NestedVisitorMap::OnlyBodies(self.map) + } + + fn visit_fn(&mut self, fk: intravisit::FnKind<'tcx>, fd: &'tcx hir::FnDecl, + b: hir::BodyId, s: Span, id: NodeId) + { + assert!(!self.found_unsafe, "We should never see more than one fn"); + let is_unsafe = match fk { + intravisit::FnKind::ItemFn(_, _, unsafety, ..) => unsafety == hir::Unsafety::Unsafe, + intravisit::FnKind::Method(_, sig, ..) => sig.unsafety == hir::Unsafety::Unsafe, + intravisit::FnKind::Closure(_) => false, + }; + if is_unsafe { + // This is unsafe, and we are done. + self.found_unsafe = true; + } else { + // Go on searching. + intravisit::walk_fn(self, fk, fd, b, s, id) + } + } + + fn visit_block(&mut self, b: &'tcx hir::Block) { + use rustc::hir::BlockCheckMode::*; + + if self.found_unsafe { return; } // short-circuit + + match b.rules { + UnsafeBlock(_) | PushUnsafeBlock(_) => { + // We found an unsafe block. + self.found_unsafe = true; + } + DefaultBlock | PopUnsafeBlock(_) => { + // No unsafe block here, go on searching. + intravisit::walk_block(self, b); + } + }; + } + } + + finder.found_unsafe +} + impl MirPass for AddValidation { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _: MirSource, - mir: &mut Mir<'tcx>) { - if !tcx.sess.opts.debugging_opts.mir_emit_validate { + src: MirSource, + mir: &mut Mir<'tcx>) + { + let emit_validate = tcx.sess.opts.debugging_opts.mir_emit_validate; + if emit_validate == 0 { return; } - + let restricted_validation = emit_validate == 1 && fn_contains_unsafe(tcx, src); let local_decls = mir.local_decls.clone(); // FIXME: Find a way to get rid of this clone. // Convert an lvalue to a validation operand. @@ -98,22 +163,40 @@ impl MirPass for AddValidation { ValidationOperand { lval, ty, re, mutbl } }; + // Emit an Acquire at the beginning of the given block. If we are in restricted emission mode + // (mir_emit_validate=1), also emit a Release immediately after the Acquire. + let emit_acquire = |block: &mut BasicBlockData<'tcx>, source_info, operands: Vec<_>| { + if operands.len() == 0 { + return; // Nothing to do + } + // Emit the release first, to avoid cloning if we do not emit it + if restricted_validation { + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, operands.clone()), + }; + block.statements.insert(0, release_stmt); + } + // Now, the acquire + let acquire_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Acquire, operands), + }; + block.statements.insert(0, acquire_stmt); + }; + // PART 1 // Add an AcquireValid at the beginning of the start block. - if mir.arg_count > 0 { - let acquire_stmt = Statement { - source_info: SourceInfo { - scope: ARGUMENT_VISIBILITY_SCOPE, - span: mir.span, // FIXME: Consider using just the span covering the function - // argument declaration. - }, - kind: StatementKind::Validate(ValidationOp::Acquire, - // Skip return value, go over all the arguments - mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count) - .map(|(local, _)| lval_to_operand(Lvalue::Local(local))).collect() - ) + { + let source_info = SourceInfo { + scope: ARGUMENT_VISIBILITY_SCOPE, + span: mir.span, // FIXME: Consider using just the span covering the function + // argument declaration. }; - mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, acquire_stmt); + // Gather all arguments, skip return value. + let operands = mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count) + .map(|(local, _)| lval_to_operand(Lvalue::Local(local))).collect(); + emit_acquire(&mut mir.basic_blocks_mut()[START_BLOCK], source_info, operands); } // PART 2 @@ -125,18 +208,20 @@ impl MirPass for AddValidation { Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. }, source_info }) => { // Before the call: Release all arguments - let release_stmt = Statement { - source_info, - kind: StatementKind::Validate(ValidationOp::Release, - args.iter().filter_map(|op| { - match op { - &Operand::Consume(ref lval) => - Some(lval_to_operand(lval.clone())), - &Operand::Constant(..) => { None }, - } - }).collect()) - }; - block_data.statements.push(release_stmt); + if !restricted_validation { + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, + args.iter().filter_map(|op| { + match op { + &Operand::Consume(ref lval) => + Some(lval_to_operand(lval.clone())), + &Operand::Constant(..) => { None }, + } + }).collect()) + }; + block_data.statements.push(release_stmt); + } // Remember the return destination for later if let &Some(ref destination) = destination { returns.push((source_info, destination.0.clone(), destination.1)); @@ -147,12 +232,14 @@ impl MirPass for AddValidation { Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref lval, .. }, source_info }) => { // Before the call: Release all arguments - let release_stmt = Statement { - source_info, - kind: StatementKind::Validate(ValidationOp::Release, - vec![lval_to_operand(lval.clone())]), - }; - block_data.statements.push(release_stmt); + if !restricted_validation { + let release_stmt = Statement { + source_info, + kind: StatementKind::Validate(ValidationOp::Release, + vec![lval_to_operand(lval.clone())]), + }; + block_data.statements.push(release_stmt); + } // drop doesn't return anything, so we need no acquire. } _ => { @@ -162,18 +249,21 @@ impl MirPass for AddValidation { } // Now we go over the returns we collected to acquire the return values. for (source_info, dest_lval, dest_block) in returns { - let acquire_stmt = Statement { + emit_acquire( + &mut mir.basic_blocks_mut()[dest_block], source_info, - kind: StatementKind::Validate(ValidationOp::Acquire, - vec![lval_to_operand(dest_lval)]), - }; - mir.basic_blocks_mut()[dest_block].statements.insert(0, acquire_stmt); + vec![lval_to_operand(dest_lval)] + ); + } + + if restricted_validation { + // No part 3 for us. + return; } // PART 3 // Add ReleaseValid/AcquireValid around Ref and Cast. Again an iterator does not seem very - // suited - // as we need to add new statements before and after each Ref. + // suited as we need to add new statements before and after each Ref. for block_data in mir.basic_blocks_mut() { // We want to insert statements around Ref commands as we iterate. To this end, we // iterate backwards using indices. diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index f01d71fde264..baf0522896c9 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -77,7 +77,9 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { block: BasicBlock, statement: &mut Statement<'tcx>, location: Location) { - if !self.tcx.sess.opts.debugging_opts.mir_emit_validate { + // Do NOT delete EndRegion if validation statements are emitted. + // Validation needs EndRegion. + if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 { if let StatementKind::EndRegion(_) = statement.kind { statement.kind = StatementKind::Nop; } diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs index 868d23b03c21..558426fcde14 100644 --- a/src/test/mir-opt/validate_1.rs +++ b/src/test/mir-opt/validate_1.rs @@ -9,7 +9,7 @@ // except according to those terms. // ignore-tidy-linelength -// compile-flags: -Z verbose -Z mir-emit-validate +// compile-flags: -Z verbose -Z mir-emit-validate=1 fn foo(_x: &mut i32) {} diff --git a/src/test/mir-opt/validate_2.rs b/src/test/mir-opt/validate_2.rs index a219c5fc78eb..21723739ca19 100644 --- a/src/test/mir-opt/validate_2.rs +++ b/src/test/mir-opt/validate_2.rs @@ -9,7 +9,7 @@ // except according to those terms. // ignore-tidy-linelength -// compile-flags: -Z verbose -Z mir-emit-validate +// compile-flags: -Z verbose -Z mir-emit-validate=1 fn main() { let _x : Box<[i32]> = Box::new([1, 2, 3]); diff --git a/src/test/mir-opt/validate_3.rs b/src/test/mir-opt/validate_3.rs index 78957115f505..88ae114c579a 100644 --- a/src/test/mir-opt/validate_3.rs +++ b/src/test/mir-opt/validate_3.rs @@ -9,7 +9,7 @@ // except according to those terms. // ignore-tidy-linelength -// compile-flags: -Z verbose -Z mir-emit-validate +// compile-flags: -Z verbose -Z mir-emit-validate=1 struct Test { x: i32 @@ -18,6 +18,10 @@ struct Test { fn foo(_x: &i32) {} fn main() { + // These internal unsafe functions should have no effect on the code generation. + unsafe fn _unused1() {} + fn _unused2(x: *const i32) -> i32 { unsafe { *x }} + let t = Test { x: 0 }; let t = &t; foo(&t.x); @@ -28,18 +32,18 @@ fn main() { // fn main() -> () { // let mut _5: &ReErased i32; // bb0: { -// Validate(Suspend(ReScope(Misc(NodeId(31)))), [((*_2).0: i32)@i32/ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 1 })) (imm)]); +// Validate(Suspend(ReScope(Misc(NodeId(46)))), [((*_2).0: i32)@i32/ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 3 })) (imm)]); // _5 = &ReErased ((*_2).0: i32); -// Validate(Acquire, [(*_5)@i32/ReScope(Misc(NodeId(31))) (imm)]); -// Validate(Suspend(ReScope(Misc(NodeId(31)))), [(*_5)@i32/ReScope(Misc(NodeId(31))) (imm)]); +// Validate(Acquire, [(*_5)@i32/ReScope(Misc(NodeId(46))) (imm)]); +// Validate(Suspend(ReScope(Misc(NodeId(46)))), [(*_5)@i32/ReScope(Misc(NodeId(46))) (imm)]); // _4 = &ReErased (*_5); -// Validate(Acquire, [(*_4)@i32/ReScope(Misc(NodeId(31))) (imm)]); -// Validate(Release, [_4@&ReScope(Misc(NodeId(31))) i32]); +// Validate(Acquire, [(*_4)@i32/ReScope(Misc(NodeId(46))) (imm)]); +// Validate(Release, [_4@&ReScope(Misc(NodeId(46))) i32]); // _3 = const foo(_4) -> bb1; // } // bb1: { -// EndRegion(ReScope(Misc(NodeId(31)))); -// EndRegion(ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 1 }))); +// EndRegion(ReScope(Misc(NodeId(46)))); +// EndRegion(ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 3 }))); // return; // } // } From 09cbe588c3a73b06ce800732ce122d8357c1d0cc Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 31 Jul 2017 15:59:29 -0700 Subject: [PATCH 117/213] more readable printing of validation operands --- src/librustc/mir/mod.rs | 2 +- src/test/mir-opt/validate_1.rs | 14 +++++++------- src/test/mir-opt/validate_2.rs | 4 ++-- src/test/mir-opt/validate_3.rs | 10 +++++----- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index f7ef542544cc..1e8dda0addf4 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -877,7 +877,7 @@ pub struct ValidationOperand<'tcx, T> { impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "{:?}@{:?}", self.lval, self.ty)?; + write!(fmt, "{:?}: {:?}", self.lval, self.ty)?; if let Some(ce) = self.re { // (reuse lifetime rendering policy from ppaux.) write!(fmt, "/{}", ty::ReScope(ce))?; diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs index 558426fcde14..4a143c4cee9e 100644 --- a/src/test/mir-opt/validate_1.rs +++ b/src/test/mir-opt/validate_1.rs @@ -22,7 +22,7 @@ fn main() { // START rustc.node4.EraseRegions.after.mir // fn foo(_1: &ReErased mut i32) -> () { // bb0: { -// Validate(Acquire, [_1@&ReFree(DefId { krate: CrateNum(0), node: DefIndex(3) => validate_1/8cd878b::foo[0] }, BrAnon(0)) mut i32]); +// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(3) => validate_1/8cd878b::foo[0] }, BrAnon(0)) mut i32]); // return; // } // } @@ -30,18 +30,18 @@ fn main() { // START rustc.node11.EraseRegions.after.mir // fn main() -> () { // bb0: { -// Validate(Suspend(ReScope(Misc(NodeId(20)))), [_1@i32]); +// Validate(Suspend(ReScope(Misc(NodeId(20)))), [_1: i32]); // _4 = &ReErased mut _1; -// Validate(Acquire, [(*_4)@i32/ReScope(Misc(NodeId(20)))]); -// Validate(Suspend(ReScope(Misc(NodeId(20)))), [(*_4)@i32/ReScope(Misc(NodeId(20)))]); +// Validate(Acquire, [(*_4): i32/ReScope(Misc(NodeId(20)))]); +// Validate(Suspend(ReScope(Misc(NodeId(20)))), [(*_4): i32/ReScope(Misc(NodeId(20)))]); // _3 = &ReErased mut (*_4); -// Validate(Acquire, [(*_3)@i32/ReScope(Misc(NodeId(20)))]); -// Validate(Release, [_3@&ReScope(Misc(NodeId(20))) mut i32]); +// Validate(Acquire, [(*_3): i32/ReScope(Misc(NodeId(20)))]); +// Validate(Release, [_3: &ReScope(Misc(NodeId(20))) mut i32]); // _2 = const foo(_3) -> bb1; // } // // bb1: { -// Validate(Acquire, [_2@()]); +// Validate(Acquire, [_2: ()]); // EndRegion(ReScope(Misc(NodeId(20)))); // return; // } diff --git a/src/test/mir-opt/validate_2.rs b/src/test/mir-opt/validate_2.rs index 21723739ca19..37ebd720d52d 100644 --- a/src/test/mir-opt/validate_2.rs +++ b/src/test/mir-opt/validate_2.rs @@ -19,9 +19,9 @@ fn main() { // START rustc.node4.EraseRegions.after.mir // fn main() -> () { // bb1: { -// Validate(Release, [_2@std::boxed::Box<[i32; 3]>]); +// Validate(Release, [_2: std::boxed::Box<[i32; 3]>]); // _1 = _2 as std::boxed::Box<[i32]> (Unsize); -// Validate(Acquire, [_1@std::boxed::Box<[i32]>]); +// Validate(Acquire, [_1: std::boxed::Box<[i32]>]); // } // } // END rustc.node4.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_3.rs b/src/test/mir-opt/validate_3.rs index 88ae114c579a..100fae5c6781 100644 --- a/src/test/mir-opt/validate_3.rs +++ b/src/test/mir-opt/validate_3.rs @@ -32,13 +32,13 @@ fn main() { // fn main() -> () { // let mut _5: &ReErased i32; // bb0: { -// Validate(Suspend(ReScope(Misc(NodeId(46)))), [((*_2).0: i32)@i32/ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 3 })) (imm)]); +// Validate(Suspend(ReScope(Misc(NodeId(46)))), [((*_2).0: i32): i32/ReScope(Remainder(BlockRemainder { block: NodeId(18), first_statement_index: 3 })) (imm)]); // _5 = &ReErased ((*_2).0: i32); -// Validate(Acquire, [(*_5)@i32/ReScope(Misc(NodeId(46))) (imm)]); -// Validate(Suspend(ReScope(Misc(NodeId(46)))), [(*_5)@i32/ReScope(Misc(NodeId(46))) (imm)]); +// Validate(Acquire, [(*_5): i32/ReScope(Misc(NodeId(46))) (imm)]); +// Validate(Suspend(ReScope(Misc(NodeId(46)))), [(*_5): i32/ReScope(Misc(NodeId(46))) (imm)]); // _4 = &ReErased (*_5); -// Validate(Acquire, [(*_4)@i32/ReScope(Misc(NodeId(46))) (imm)]); -// Validate(Release, [_4@&ReScope(Misc(NodeId(46))) i32]); +// Validate(Acquire, [(*_4): i32/ReScope(Misc(NodeId(46))) (imm)]); +// Validate(Release, [_4: &ReScope(Misc(NodeId(46))) i32]); // _3 = const foo(_4) -> bb1; // } // bb1: { From 26ca0d1b3ac8046662e1a6d976c9fdfba2c118dc Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 31 Jul 2017 16:15:37 -0700 Subject: [PATCH 118/213] tidy --- src/librustc_mir/transform/add_validation.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 1329378fbef0..86a86f4934ca 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -163,8 +163,8 @@ impl MirPass for AddValidation { ValidationOperand { lval, ty, re, mutbl } }; - // Emit an Acquire at the beginning of the given block. If we are in restricted emission mode - // (mir_emit_validate=1), also emit a Release immediately after the Acquire. + // Emit an Acquire at the beginning of the given block. If we are in restricted emission + // mode (mir_emit_validate=1), also emit a Release immediately after the Acquire. let emit_acquire = |block: &mut BasicBlockData<'tcx>, source_info, operands: Vec<_>| { if operands.len() == 0 { return; // Nothing to do From 881a7246606cb6ced8ba63a8d58b7e54fed90b7c Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Mon, 31 Jul 2017 18:10:01 -0700 Subject: [PATCH 119/213] Gate LLVMRustHasFeature on LLVM_RUSTLLVM Commit c4710203c098b in #43492 make `LLVMRustHasFeature` "more robust" by using `getFeatureTable()`. However, this function is specific to Rust's own LLVM fork, not upstream LLVM-4.0, so we need to use `#if LLVM_RUSTLLVM` to guard this call. --- src/rustllvm/PassWrapper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rustllvm/PassWrapper.cpp b/src/rustllvm/PassWrapper.cpp index 57e90be27748..bca0881c08c5 100644 --- a/src/rustllvm/PassWrapper.cpp +++ b/src/rustllvm/PassWrapper.cpp @@ -178,10 +178,10 @@ GEN_SUBTARGETS extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM, const char *Feature) { +#if LLVM_RUSTLLVM TargetMachine *Target = unwrap(TM); const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo(); const FeatureBitset &Bits = MCInfo->getFeatureBits(); -#if LLVM_VERSION_GE(4, 0) const ArrayRef FeatTable = MCInfo->getFeatureTable(); for (auto &FeatureEntry : FeatTable) From e73d3145f57817ff91468107fc8cad3c6d6616e1 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 31 Jul 2017 18:33:45 -0700 Subject: [PATCH 120/213] fix AddValidation on methods --- src/librustc_mir/transform/add_validation.rs | 10 ++++- src/test/mir-opt/validate_1.rs | 39 +++++++++++--------- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 86a86f4934ca..a3ec6af76034 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -85,19 +85,25 @@ fn lval_context<'a, 'tcx, D>( /// Check if this function contains an unsafe block or is an unsafe function. fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> bool { use rustc::hir::intravisit::{self, Visitor}; + use rustc::hir::map::Node; let fn_node_id = match src { MirSource::Fn(node_id) => node_id, _ => return false, // only functions can have unsafe }; - let fn_item = tcx.hir.expect_item(fn_node_id); struct FindUnsafe<'b, 'tcx> where 'tcx : 'b { map: &'b hir::map::Map<'tcx>, found_unsafe: bool, } let mut finder = FindUnsafe { map: &tcx.hir, found_unsafe: false }; - finder.visit_item(fn_item); + // Run the visitor on the NodeId we got. Seems like there is no uniform way to do that. + match tcx.hir.find(fn_node_id) { + Some(Node::NodeItem(item)) => finder.visit_item(item), + Some(Node::NodeImplItem(item)) => finder.visit_impl_item(item), + Some(_) | None => + bug!("Expected method or function, found {}", tcx.hir.node_to_string(fn_node_id)), + }; impl<'b, 'tcx> Visitor<'tcx> for FindUnsafe<'b, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> { diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs index 4a143c4cee9e..c8ea2bc25447 100644 --- a/src/test/mir-opt/validate_1.rs +++ b/src/test/mir-opt/validate_1.rs @@ -11,39 +11,42 @@ // ignore-tidy-linelength // compile-flags: -Z verbose -Z mir-emit-validate=1 -fn foo(_x: &mut i32) {} +struct Test; + +impl Test { + // Make sure we run the pass on a method, not just on bare functions. + fn foo(&self, _x: &mut i32) {} +} fn main() { let mut x = 0; - foo(&mut x); + Test.foo(&mut x); } // END RUST SOURCE -// START rustc.node4.EraseRegions.after.mir -// fn foo(_1: &ReErased mut i32) -> () { +// START rustc.node10.EraseRegions.after.mir // bb0: { -// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(3) => validate_1/8cd878b::foo[0] }, BrAnon(0)) mut i32]); +// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(5) => validate_1/8cd878b::{{impl}}[0]::foo[0] }, BrAnon(0)) Test, _2: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(5) => validate_1/8cd878b::{{impl}}[0]::foo[0] }, BrAnon(1)) mut i32]); // return; // } -// } -// END rustc.node4.EraseRegions.after.mir -// START rustc.node11.EraseRegions.after.mir +// END rustc.node10.EraseRegions.after.mir +// START rustc.node21.EraseRegions.after.mir // fn main() -> () { // bb0: { -// Validate(Suspend(ReScope(Misc(NodeId(20)))), [_1: i32]); -// _4 = &ReErased mut _1; -// Validate(Acquire, [(*_4): i32/ReScope(Misc(NodeId(20)))]); -// Validate(Suspend(ReScope(Misc(NodeId(20)))), [(*_4): i32/ReScope(Misc(NodeId(20)))]); -// _3 = &ReErased mut (*_4); -// Validate(Acquire, [(*_3): i32/ReScope(Misc(NodeId(20)))]); -// Validate(Release, [_3: &ReScope(Misc(NodeId(20))) mut i32]); -// _2 = const foo(_3) -> bb1; +// Validate(Suspend(ReScope(Misc(NodeId(30)))), [_1: i32]); +// _6 = &ReErased mut _1; +// Validate(Acquire, [(*_6): i32/ReScope(Misc(NodeId(30)))]); +// Validate(Suspend(ReScope(Misc(NodeId(30)))), [(*_6): i32/ReScope(Misc(NodeId(30)))]); +// _5 = &ReErased mut (*_6); +// Validate(Acquire, [(*_5): i32/ReScope(Misc(NodeId(30)))]); +// Validate(Release, [_3: &ReScope(Misc(NodeId(30))) Test, _5: &ReScope(Misc(NodeId(30))) mut i32]); +// _2 = const Test::foo(_3, _5) -> bb1; // } // // bb1: { // Validate(Acquire, [_2: ()]); -// EndRegion(ReScope(Misc(NodeId(20)))); +// EndRegion(ReScope(Misc(NodeId(30)))); // return; // } // } -// END rustc.node11.EraseRegions.after.mir +// END rustc.node21.EraseRegions.after.mir From dd371a2069e84bf58702cb4c760681ee9c8aa874 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 31 Jul 2017 18:39:25 -0700 Subject: [PATCH 121/213] rustc: Inline bitwise modification operators These need to be inlined across crates to avoid showing up as one-instruction functions in profiles! In the benchmark from #43578 this decreased the translation item collection step from 30s to 23s, and looks like it also allowed vectorization elsewhere of the operations! --- src/librustc_data_structures/bitslice.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/librustc_data_structures/bitslice.rs b/src/librustc_data_structures/bitslice.rs index ba53578e5791..f74af6ee1632 100644 --- a/src/librustc_data_structures/bitslice.rs +++ b/src/librustc_data_structures/bitslice.rs @@ -134,9 +134,11 @@ pub trait BitwiseOperator { pub struct Union; impl BitwiseOperator for Union { + #[inline] fn join(&self, a: usize, b: usize) -> usize { a | b } } pub struct Subtract; impl BitwiseOperator for Subtract { + #[inline] fn join(&self, a: usize, b: usize) -> usize { a & !b } } From 2dbfa3995e44af6ce4fbeaa2f9de4730e5d2fbd5 Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Mon, 31 Jul 2017 00:31:32 -0700 Subject: [PATCH 122/213] limit and delimit available fields in note Also, don't show the note if no fields are available (usually due to privacy). --- src/librustc_typeck/check/mod.rs | 31 +++++++++++++------ src/test/compile-fail/E0559.rs | 2 +- src/test/compile-fail/E0560.rs | 2 +- src/test/compile-fail/issue-19922.rs | 2 +- src/test/compile-fail/numeric-fields.rs | 2 +- .../compile-fail/struct-fields-too-many.rs | 2 +- .../compile-fail/suggest-private-fields.rs | 2 +- src/test/compile-fail/union/union-fields.rs | 2 +- .../issue-36798_unknown_field.stderr | 2 +- .../issue-42599_available_fields_note.rs | 8 +++-- .../issue-42599_available_fields_note.stderr | 20 ++++++------ 11 files changed, 46 insertions(+), 29 deletions(-) diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 4b40a46f74ee..178331afabe1 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -2957,10 +2957,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } else { err.span_label(field.span, "unknown field"); let struct_variant_def = def.struct_variant(); - let available_field_names = self.available_field_names( - struct_variant_def); - err.note(&format!("available fields are: {}", - available_field_names.join(", "))); + let field_names = self.available_field_names(struct_variant_def); + if !field_names.is_empty() { + err.note(&format!("available fields are: {}", + self.name_series_display(field_names))); + } }; } ty::TyRawPtr(..) => { @@ -3000,17 +3001,28 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { find_best_match_for_name(names, &name, None) } - fn available_field_names(&self, variant: &'tcx ty::VariantDef) -> Vec { + fn available_field_names(&self, variant: &'tcx ty::VariantDef) -> Vec { let mut available = Vec::new(); for field in variant.fields.iter() { let (_, def_scope) = self.tcx.adjust(field.name, variant.did, self.body_id); if field.vis.is_accessible_from(def_scope, self.tcx) { - available.push(field.name.to_string()); + available.push(field.name); } } available } + fn name_series_display(&self, names: Vec) -> String { + // dynamic limit, to never omit just one field + let limit = if names.len() == 6 { 6 } else { 5 }; + let mut display = names.iter().take(limit) + .map(|n| format!("`{}`", n)).collect::>().join(", "); + if names.len() > limit { + display = format!("{} ... and {} others", display, names.len() - limit); + } + display + } + // Check tuple index expressions fn check_tup_field(&self, expr: &'gcx hir::Expr, @@ -3132,12 +3144,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { format!("`{}` does not have this field", ty)); } let available_field_names = self.available_field_names(variant); - err.note(&format!("available fields are: {}", - available_field_names.join(", "))); + if !available_field_names.is_empty() { + err.note(&format!("available fields are: {}", + self.name_series_display(available_field_names))); + } } _ => bug!("non-ADT passed to report_unknown_field") } - }; err.emit(); } diff --git a/src/test/compile-fail/E0559.rs b/src/test/compile-fail/E0559.rs index 21bb2dc7002c..e8b0915d2b53 100644 --- a/src/test/compile-fail/E0559.rs +++ b/src/test/compile-fail/E0559.rs @@ -16,5 +16,5 @@ fn main() { let s = Field::Fool { joke: 0 }; //~^ ERROR E0559 //~| NOTE `Field::Fool` does not have this field - //~| NOTE available fields are: x + //~| NOTE available fields are: `x` } diff --git a/src/test/compile-fail/E0560.rs b/src/test/compile-fail/E0560.rs index 7aa6b2e86d69..955ef7ca99ce 100644 --- a/src/test/compile-fail/E0560.rs +++ b/src/test/compile-fail/E0560.rs @@ -16,5 +16,5 @@ fn main() { let s = Simba { mother: 1, father: 0 }; //~^ ERROR E0560 //~| NOTE `Simba` does not have this field - //~| NOTE available fields are: mother + //~| NOTE available fields are: `mother` } diff --git a/src/test/compile-fail/issue-19922.rs b/src/test/compile-fail/issue-19922.rs index 429c4384117a..938ccb343d42 100644 --- a/src/test/compile-fail/issue-19922.rs +++ b/src/test/compile-fail/issue-19922.rs @@ -16,5 +16,5 @@ fn main() { let homura = Homura::Akemi { kaname: () }; //~^ ERROR variant `Homura::Akemi` has no field named `kaname` //~| NOTE `Homura::Akemi` does not have this field - //~| NOTE available fields are: madoka + //~| NOTE available fields are: `madoka` } diff --git a/src/test/compile-fail/numeric-fields.rs b/src/test/compile-fail/numeric-fields.rs index 242c3a3a33d2..d6e091a1472c 100644 --- a/src/test/compile-fail/numeric-fields.rs +++ b/src/test/compile-fail/numeric-fields.rs @@ -14,7 +14,7 @@ fn main() { let s = S{0b1: 10, 0: 11}; //~^ ERROR struct `S` has no field named `0b1` //~| NOTE `S` does not have this field - //~| NOTE available fields are: 0, 1 + //~| NOTE available fields are: `0`, `1` match s { S{0: a, 0x1: b, ..} => {} //~^ ERROR does not have a field named `0x1` diff --git a/src/test/compile-fail/struct-fields-too-many.rs b/src/test/compile-fail/struct-fields-too-many.rs index 78ab94d5fb4d..b1af142ad0fd 100644 --- a/src/test/compile-fail/struct-fields-too-many.rs +++ b/src/test/compile-fail/struct-fields-too-many.rs @@ -18,6 +18,6 @@ fn main() { bar: 0 //~^ ERROR struct `BuildData` has no field named `bar` //~| NOTE `BuildData` does not have this field - //~| NOTE available fields are: foo + //~| NOTE available fields are: `foo` }; } diff --git a/src/test/compile-fail/suggest-private-fields.rs b/src/test/compile-fail/suggest-private-fields.rs index 959932af9b1d..d0752b5f02f0 100644 --- a/src/test/compile-fail/suggest-private-fields.rs +++ b/src/test/compile-fail/suggest-private-fields.rs @@ -28,7 +28,7 @@ fn main () { bb: 20, //~^ ERROR struct `xc::B` has no field named `bb` //~| NOTE `xc::B` does not have this field - //~| NOTE available fields are: a + //~| NOTE available fields are: `a` }; // local crate struct let l = A { diff --git a/src/test/compile-fail/union/union-fields.rs b/src/test/compile-fail/union/union-fields.rs index 2bcc2204e334..124b16f99b1a 100644 --- a/src/test/compile-fail/union/union-fields.rs +++ b/src/test/compile-fail/union/union-fields.rs @@ -20,7 +20,7 @@ fn main() { let u = U { a: 0, b: 1, c: 2 }; //~ ERROR union expressions should have exactly one field //~^ ERROR union `U` has no field named `c` //~| NOTE `U` does not have this field - //~| NOTE available fields are: a, b + //~| NOTE available fields are: `a`, `b` let u = U { ..u }; //~ ERROR union expressions should have exactly one field //~^ ERROR functional record update syntax requires a struct diff --git a/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr b/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr index 610466c894aa..20bb7d4c91de 100644 --- a/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr +++ b/src/test/ui/did_you_mean/issue-36798_unknown_field.stderr @@ -4,7 +4,7 @@ error[E0609]: no field `zz` on type `Foo` 17 | f.zz; | ^^ unknown field | - = note: available fields are: bar + = note: available fields are: `bar` error: aborting due to previous error diff --git a/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs b/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs index 4b0cc7b96a76..7fe995080122 100644 --- a/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs +++ b/src/test/ui/did_you_mean/issue-42599_available_fields_note.rs @@ -14,7 +14,11 @@ mod submodule { pub struct Demo { pub favorite_integer: isize, secret_integer: isize, - pub innocently_misspellable: () + pub innocently_misspellable: (), + another_field: bool, + yet_another_field: bool, + always_more_fields: bool, + and_ever: bool, } impl Demo { @@ -34,6 +38,6 @@ fn main() { let demo = Demo::default(); let innocent_field_misaccess = demo.inocently_mispellable; - // note shouldn't suggest private `secret_integer` field + // note shouldn't suggest private fields let egregious_field_misaccess = demo.egregiously_nonexistent_field; } diff --git a/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr b/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr index 17edac92fd9d..e2bb7fbd9a89 100644 --- a/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr +++ b/src/test/ui/did_you_mean/issue-42599_available_fields_note.stderr @@ -1,30 +1,30 @@ error[E0560]: struct `submodule::Demo` has no field named `inocently_mispellable` - --> $DIR/issue-42599_available_fields_note.rs:22:39 + --> $DIR/issue-42599_available_fields_note.rs:26:39 | -22 | Self { secret_integer: 2, inocently_mispellable: () } +26 | Self { secret_integer: 2, inocently_mispellable: () } | ^^^^^^^^^^^^^^^^^^^^^^ field does not exist - did you mean `innocently_misspellable`? error[E0560]: struct `submodule::Demo` has no field named `egregiously_nonexistent_field` - --> $DIR/issue-42599_available_fields_note.rs:26:39 + --> $DIR/issue-42599_available_fields_note.rs:30:39 | -26 | Self { secret_integer: 3, egregiously_nonexistent_field: () } +30 | Self { secret_integer: 3, egregiously_nonexistent_field: () } | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `submodule::Demo` does not have this field | - = note: available fields are: favorite_integer, secret_integer, innocently_misspellable + = note: available fields are: `favorite_integer`, `secret_integer`, `innocently_misspellable`, `another_field`, `yet_another_field` ... and 2 others error[E0609]: no field `inocently_mispellable` on type `submodule::Demo` - --> $DIR/issue-42599_available_fields_note.rs:36:41 + --> $DIR/issue-42599_available_fields_note.rs:40:41 | -36 | let innocent_field_misaccess = demo.inocently_mispellable; +40 | let innocent_field_misaccess = demo.inocently_mispellable; | ^^^^^^^^^^^^^^^^^^^^^ did you mean `innocently_misspellable`? error[E0609]: no field `egregiously_nonexistent_field` on type `submodule::Demo` - --> $DIR/issue-42599_available_fields_note.rs:38:42 + --> $DIR/issue-42599_available_fields_note.rs:42:42 | -38 | let egregious_field_misaccess = demo.egregiously_nonexistent_field; +42 | let egregious_field_misaccess = demo.egregiously_nonexistent_field; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unknown field | - = note: available fields are: favorite_integer, innocently_misspellable + = note: available fields are: `favorite_integer`, `innocently_misspellable` error: aborting due to 4 previous errors From 584d823bf2c309cb7b40aadd9d55ecc75f7eb9fc Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 31 Jul 2017 19:51:10 -0700 Subject: [PATCH 123/213] Handle closures. Add some more tests. --- src/librustc_mir/transform/add_validation.rs | 61 +++++++++++++++----- src/test/mir-opt/validate_1.rs | 7 +++ src/test/mir-opt/validate_4.rs | 58 +++++++++++++++++++ src/test/mir-opt/validate_5.rs | 44 ++++++++++++++ 4 files changed, 156 insertions(+), 14 deletions(-) create mode 100644 src/test/mir-opt/validate_4.rs create mode 100644 src/test/mir-opt/validate_5.rs diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index a3ec6af76034..6f136624f0ac 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -87,6 +87,17 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> use rustc::hir::intravisit::{self, Visitor}; use rustc::hir::map::Node; + fn block_is_unsafe(block: &hir::Block) -> bool { + use rustc::hir::BlockCheckMode::*; + + match block.rules { + UnsafeBlock(_) | PushUnsafeBlock(_) => true, + // For PopUnsafeBlock, we don't actually know -- but we will always also check all + // parent blocks, so we can safely declare the PopUnsafeBlock to not be unsafe. + DefaultBlock | PopUnsafeBlock(_) => false, + } + } + let fn_node_id = match src { MirSource::Fn(node_id) => node_id, _ => return false, // only functions can have unsafe @@ -101,8 +112,35 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> match tcx.hir.find(fn_node_id) { Some(Node::NodeItem(item)) => finder.visit_item(item), Some(Node::NodeImplItem(item)) => finder.visit_impl_item(item), + Some(Node::NodeExpr(item)) => { + // This is a closure. + // We also have to walk up the parents and check that there is no unsafe block + // there. + let mut cur = fn_node_id; + loop { + // Go further upwards. + let parent = tcx.hir.get_parent_node(cur); + if cur == parent { + break; + } + cur = parent; + // Check if this is a a block + match tcx.hir.find(cur) { + Some(Node::NodeExpr(&hir::Expr { node: hir::ExprBlock(ref block), ..})) => { + if block_is_unsafe(&*block) { + // We can bail out here. + return true; + } + } + _ => {}, + } + } + // Finally, visit the closure itself. + finder.visit_expr(item); + } Some(_) | None => - bug!("Expected method or function, found {}", tcx.hir.node_to_string(fn_node_id)), + bug!("Expected function, method or closure, found {}", + tcx.hir.node_to_string(fn_node_id)), }; impl<'b, 'tcx> Visitor<'tcx> for FindUnsafe<'b, 'tcx> { @@ -113,7 +151,7 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> fn visit_fn(&mut self, fk: intravisit::FnKind<'tcx>, fd: &'tcx hir::FnDecl, b: hir::BodyId, s: Span, id: NodeId) { - assert!(!self.found_unsafe, "We should never see more than one fn"); + assert!(!self.found_unsafe, "We should never see a fn when we already saw unsafe"); let is_unsafe = match fk { intravisit::FnKind::ItemFn(_, _, unsafety, ..) => unsafety == hir::Unsafety::Unsafe, intravisit::FnKind::Method(_, sig, ..) => sig.unsafety == hir::Unsafety::Unsafe, @@ -129,20 +167,15 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> } fn visit_block(&mut self, b: &'tcx hir::Block) { - use rustc::hir::BlockCheckMode::*; - if self.found_unsafe { return; } // short-circuit - match b.rules { - UnsafeBlock(_) | PushUnsafeBlock(_) => { - // We found an unsafe block. - self.found_unsafe = true; - } - DefaultBlock | PopUnsafeBlock(_) => { - // No unsafe block here, go on searching. - intravisit::walk_block(self, b); - } - }; + if block_is_unsafe(b) { + // We found an unsafe block. We can stop searching. + self.found_unsafe = true; + } else { + // No unsafe block here, go on searching. + intravisit::walk_block(self, b); + } } } diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs index c8ea2bc25447..b85d9261e4a9 100644 --- a/src/test/mir-opt/validate_1.rs +++ b/src/test/mir-opt/validate_1.rs @@ -21,8 +21,15 @@ impl Test { fn main() { let mut x = 0; Test.foo(&mut x); + + // Also test closures + let c = |x: &mut i32| { let y = &*x; *y }; + c(&mut x); } +// FIXME: Also test code generated inside the closure, make sure it has validation. Unfortunately, +// the interesting lines of code also contain name of the source file, so we cannot test for it. + // END RUST SOURCE // START rustc.node10.EraseRegions.after.mir // bb0: { diff --git a/src/test/mir-opt/validate_4.rs b/src/test/mir-opt/validate_4.rs new file mode 100644 index 000000000000..49acaccd86ab --- /dev/null +++ b/src/test/mir-opt/validate_4.rs @@ -0,0 +1,58 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-tidy-linelength +// compile-flags: -Z verbose -Z mir-emit-validate=1 + +// Make sure unsafe fns and fns with an unsafe block only get restricted validation. + +unsafe fn write_42(x: *mut i32) -> bool { + *x = 42; + true +} + +fn test(x: &mut i32) { + unsafe { write_42(x) }; +} + +fn main() { + test(&mut 0); + + let test_closure = unsafe { |x: &mut i32| write_42(x) }; + test_closure(&mut 0); +} + +// FIXME: Also test code generated inside the closure, make sure it only does restricted validation +// because it is entirely inside an unsafe block. Unfortunately, the interesting lines of code also +// contain name of the source file, so we cannot test for it. + +// END RUST SOURCE +// START rustc.node4.EraseRegions.after.mir +// fn write_42(_1: *mut i32) -> bool { +// bb0: { +// Validate(Acquire, [_1: *mut i32]); +// Validate(Release, [_1: *mut i32]); +// return; +// } +// } +// END rustc.node4.EraseRegions.after.mir +// START rustc.node17.EraseRegions.after.mir +// fn test(_1: &ReErased mut i32) -> () { +// bb0: { +// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_4/8cd878b::test[0] }, BrAnon(0)) mut i32]); +// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_4/8cd878b::test[0] }, BrAnon(0)) mut i32]); +// _3 = const write_42(_4) -> bb1; +// } +// bb1: { +// Validate(Acquire, [_3: bool]); +// Validate(Release, [_3: bool]); +// } +// } +// END rustc.node17.EraseRegions.after.mir diff --git a/src/test/mir-opt/validate_5.rs b/src/test/mir-opt/validate_5.rs new file mode 100644 index 000000000000..1831f9dd713f --- /dev/null +++ b/src/test/mir-opt/validate_5.rs @@ -0,0 +1,44 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-tidy-linelength +// compile-flags: -Z verbose -Z mir-emit-validate=2 + +// Make sure unsafe fns and fns with an unsafe block only get full validation. + +unsafe fn write_42(x: *mut i32) -> bool { + *x = 42; + true +} + +fn test(x: &mut i32) { + unsafe { write_42(x) }; +} + +fn main() { + test(&mut 0); + + let test_closure = unsafe { |x: &mut i32| write_42(x) }; + test_closure(&mut 0); +} + +// FIXME: Also test code generated inside the closure, make sure it has validation. Unfortunately, +// the interesting lines of code also contain name of the source file, so we cannot test for it. + +// END RUST SOURCE +// START rustc.node17.EraseRegions.after.mir +// fn test(_1: &ReErased mut i32) -> () { +// bb0: { +// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_5/8cd878b::test[0] }, BrAnon(0)) mut i32]); +// Validate(Release, [_4: *mut i32]); +// _3 = const write_42(_4) -> bb1; +// } +// } +// END rustc.node17.EraseRegions.after.mir From 27b9182d5bc07d87a34c3a1ffda99d54ca2fec69 Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Tue, 1 Aug 2017 14:43:11 +1200 Subject: [PATCH 124/213] review changes --- src/Cargo.lock | 1 + src/librustc_save_analysis/Cargo.toml | 1 + src/librustc_save_analysis/dump_visitor.rs | 6 +++--- src/librustc_save_analysis/lib.rs | 1 + src/librustc_save_analysis/span_utils.rs | 9 +++++---- 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index fba32d4e40af..ed8a83e9e6e1 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1549,6 +1549,7 @@ dependencies = [ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_data_structures 0.0.0", "rustc_typeck 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", diff --git a/src/librustc_save_analysis/Cargo.toml b/src/librustc_save_analysis/Cargo.toml index 2a51bf9430e7..00b01994eb8b 100644 --- a/src/librustc_save_analysis/Cargo.toml +++ b/src/librustc_save_analysis/Cargo.toml @@ -11,6 +11,7 @@ crate-type = ["dylib"] [dependencies] log = "0.3" rustc = { path = "../librustc" } +rustc_data_structures = { path = "../librustc_data_structures" } rustc_typeck = { path = "../librustc_typeck" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index ca27bd76fff7..f74e8cb21608 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -29,8 +29,8 @@ use rustc::hir::def_id::DefId; use rustc::hir::map::Node; use rustc::session::Session; use rustc::ty::{self, TyCtxt}; +use rustc_data_structures::fx::FxHashSet; -use std::collections::HashSet; use std::path::Path; use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID}; @@ -75,7 +75,7 @@ pub struct DumpVisitor<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> { // we only write one macro def per unique macro definition, and // one macro use per unique callsite span. // mac_defs: HashSet, - macro_calls: HashSet, + macro_calls: FxHashSet, } impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { @@ -91,7 +91,7 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { span: span_utils.clone(), cur_scope: CRATE_NODE_ID, // mac_defs: HashSet::new(), - macro_calls: HashSet::new(), + macro_calls: FxHashSet(), } } diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs index c9489aac9810..1dd0df4108fc 100644 --- a/src/librustc_save_analysis/lib.rs +++ b/src/librustc_save_analysis/lib.rs @@ -23,6 +23,7 @@ #[macro_use] extern crate log; #[macro_use] extern crate syntax; +extern crate rustc_data_structures; extern crate rustc_serialize; extern crate rustc_typeck; extern crate syntax_pos; diff --git a/src/librustc_save_analysis/span_utils.rs b/src/librustc_save_analysis/span_utils.rs index 660fe7dfa3d9..e771da2ed4ce 100644 --- a/src/librustc_save_analysis/span_utils.rs +++ b/src/librustc_save_analysis/span_utils.rs @@ -398,9 +398,10 @@ impl<'a> SpanUtils<'a> { return false; } // If sub_span is none, filter out generated code. - if sub_span.is_none() { - return true; - } + let sub_span = match sub_span { + Some(ss) => ss, + None => return true, + }; //If the span comes from a fake filemap, filter it. if !self.sess.codemap().lookup_char_pos(parent.lo).file.is_real_file() { @@ -409,7 +410,7 @@ impl<'a> SpanUtils<'a> { // Otherwise, a generated span is deemed invalid if it is not a sub-span of the root // callsite. This filters out macro internal variables and most malformed spans. - !parent.source_callsite().contains(sub_span.unwrap()) + !parent.source_callsite().contains(sub_span) } } From ce0ca763808f4b5d153aaa2787ea253286b449ef Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 1 Aug 2017 11:57:26 +0300 Subject: [PATCH 125/213] pacify the merciless tidy --- src/librustc_mir/build/scope.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index bf39e52bd1b2..ccba87a4d26a 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -386,8 +386,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. - let may_panic = - self.scopes[(len - scope_count)..].iter().any(|s| s.drops.iter().any(|s| s.kind.may_panic())); + let may_panic = self.scopes[(len - scope_count)..].iter() + .any(|s| s.drops.iter().any(|s| s.kind.may_panic())); if may_panic { self.diverge_cleanup(); } From c9d14a846f4e34d2cf0db89423a32428ad8e924f Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 1 Aug 2017 14:17:11 +0300 Subject: [PATCH 126/213] syntax: avoid loading the same source-file multiple times We already had a cache for file contents, but we read the source-file before testing the cache, causing obvious slowness, so this just avoids loading the source-file when the cache already has the contents. --- src/libsyntax/codemap.rs | 5 +++-- src/libsyntax_pos/lib.rs | 5 ++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index b3d9cf9da36c..bfdcae7641dd 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -561,8 +561,9 @@ impl CodeMapper for CodeMap { sp } fn ensure_filemap_source_present(&self, file_map: Rc) -> bool { - let src = self.file_loader.read_file(Path::new(&file_map.name)).ok(); - return file_map.add_external_src(src) + file_map.add_external_src( + || self.file_loader.read_file(Path::new(&file_map.name)).ok() + ) } } diff --git a/src/libsyntax_pos/lib.rs b/src/libsyntax_pos/lib.rs index 3a701f91314b..7006f45455e3 100644 --- a/src/libsyntax_pos/lib.rs +++ b/src/libsyntax_pos/lib.rs @@ -618,8 +618,11 @@ impl FileMap { /// If the hash of the input doesn't match or no input is supplied via None, /// it is interpreted as an error and the corresponding enum variant is set. /// The return value signifies whether some kind of source is present. - pub fn add_external_src(&self, src: Option) -> bool { + pub fn add_external_src(&self, get_src: F) -> bool + where F: FnOnce() -> Option + { if *self.external_src.borrow() == ExternalSource::AbsentOk { + let src = get_src(); let mut external_src = self.external_src.borrow_mut(); if let Some(src) = src { let mut hasher: StableHasher = StableHasher::new(); From 70478ca5c83513beb91cce78ae57ade70849fca4 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 1 Aug 2017 14:44:20 +0300 Subject: [PATCH 127/213] rustc::hir::map::definitions - fix O(n^2) when disambiguating Instead of finding the next free disambiguator by incrementing it until you find a place, store the next available disambiguator in an hash-map. This avoids O(n^2) performance when lots of items have the same un-disambiguated `DefPathData` - e.g. all `use` items have `DefPathData::Misc`. --- src/librustc/hir/map/definitions.rs | 32 ++++++++++++++--------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs index 91bce64243e3..cdd5a6e3da7f 100644 --- a/src/librustc/hir/map/definitions.rs +++ b/src/librustc/hir/map/definitions.rs @@ -18,7 +18,7 @@ use hir; use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, DefIndexAddressSpace, CRATE_DEF_INDEX}; use ich::Fingerprint; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::stable_hasher::StableHasher; use serialize::{Encodable, Decodable, Encoder, Decoder}; @@ -153,7 +153,7 @@ pub struct Definitions { pub(super) node_to_hir_id: IndexVec, macro_def_scopes: FxHashMap, expansions: FxHashMap, - keys_created: FxHashSet, + next_disambiguator: FxHashMap<(DefIndex, DefPathData), u32>, } // Unfortunately we have to provide a manual impl of Clone because of the @@ -170,7 +170,7 @@ impl Clone for Definitions { node_to_hir_id: self.node_to_hir_id.clone(), macro_def_scopes: self.macro_def_scopes.clone(), expansions: self.expansions.clone(), - keys_created: self.keys_created.clone(), + next_disambiguator: self.next_disambiguator.clone(), } } } @@ -402,7 +402,7 @@ impl Definitions { node_to_hir_id: IndexVec::new(), macro_def_scopes: FxHashMap(), expansions: FxHashMap(), - keys_created: FxHashSet(), + next_disambiguator: FxHashMap(), } } @@ -516,20 +516,20 @@ impl Definitions { // The root node must be created with create_root_def() assert!(data != DefPathData::CrateRoot); - // Find a unique DefKey. This basically means incrementing the disambiguator - // until we get no match. - let mut key = DefKey { - parent: Some(parent), - disambiguated_data: DisambiguatedDefPathData { - data, - disambiguator: 0 - } + // Find the next free disambiguator for this key. + let disambiguator = { + let next_disamb = self.next_disambiguator.entry((parent, data.clone())).or_insert(0); + let disambiguator = *next_disamb; + *next_disamb = next_disamb.checked_add(1).expect("disambiguator overflow"); + disambiguator }; - while self.keys_created.contains(&key) { - key.disambiguated_data.disambiguator += 1; - } - self.keys_created.insert(key.clone()); + let key = DefKey { + parent: Some(parent), + disambiguated_data: DisambiguatedDefPathData { + data, disambiguator + } + }; let parent_hash = self.table.def_path_hash(parent); let def_path_hash = key.compute_stable_hash(parent_hash); From b8d441350b28a6325934584ae07e3437d5cf9ad3 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Tue, 1 Aug 2017 11:04:24 +0200 Subject: [PATCH 128/213] async-llvm(28): Make some error messages more informative. --- src/librustc_trans/back/write.rs | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 85860f0e33a3..0d5fe6c0ae95 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -1323,13 +1323,16 @@ fn start_executing_work(sess: &Session, if main_thread_worker_state == MainThreadWorkerState::Idle { if !queue_full_enough(work_items.len(), running, max_workers) { // The queue is not full enough, translate more items: - trans_worker_send.send(Message::TranslateItem).unwrap(); + if let Err(_) = trans_worker_send.send(Message::TranslateItem) { + panic!("Could not send Message::TranslateItem to main thread") + } main_thread_worker_state = MainThreadWorkerState::Translating; } else { // The queue is full enough to not let the worker // threads starve. Use the implicit Token to do some // LLVM work too. - let (item, _) = work_items.pop().unwrap(); + let (item, _) = work_items.pop() + .expect("queue empty - queue_full_enough() broken?"); let cgcx = CodegenContext { worker: get_worker_id(&mut free_worker_ids), .. cgcx.clone() @@ -1406,7 +1409,7 @@ fn start_executing_work(sess: &Session, let msg = &format!("failed to acquire jobserver token: {}", e); shared_emitter.fatal(msg); // Exit the coordinator thread - panic!() + panic!("{}", msg) } } } @@ -1475,7 +1478,7 @@ fn start_executing_work(sess: &Session, Message::Done { result: Err(()), worker_id: _ } => { shared_emitter.fatal("aborting due to worker thread panic"); // Exit the coordinator thread - panic!() + panic!("aborting due to worker thread panic") } Message::TranslateItem => { bug!("the coordinator should not receive translation requests") @@ -1493,9 +1496,12 @@ fn start_executing_work(sess: &Session, total_llvm_time); } + let compiled_metadata_module = compiled_metadata_module + .expect("Metadata module not compiled?"); + CompiledModules { modules: compiled_modules, - metadata_module: compiled_metadata_module.unwrap(), + metadata_module: compiled_metadata_module, allocator_module: compiled_allocator_module, } }); @@ -1506,6 +1512,7 @@ fn start_executing_work(sess: &Session, workers_running: usize, max_workers: usize) -> bool { // Tune me, plz. + items_in_queue > 0 && items_in_queue >= max_workers.saturating_sub(workers_running / 2) } @@ -1805,7 +1812,12 @@ pub struct OngoingCrateTranslation { impl OngoingCrateTranslation { pub fn join(self, sess: &Session) -> CrateTranslation { self.shared_emitter_main.check(sess, true); - let compiled_modules = self.future.join().unwrap(); + let compiled_modules = match self.future.join() { + Ok(compiled_modules) => compiled_modules, + Err(_) => { + sess.fatal("Error during translation/LLVM phase."); + } + }; sess.abort_if_errors(); From 6468cad977d4c81d30ba000633eaa43bc18591f9 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Tue, 1 Aug 2017 15:57:38 +0200 Subject: [PATCH 129/213] async-llvm(29): Adapt run-make/llvm-phase test case to LLVM module not being available in memory. --- src/test/run-make/llvm-phase/test.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/test/run-make/llvm-phase/test.rs b/src/test/run-make/llvm-phase/test.rs index a75dc7e57a9a..7a63871f19e3 100644 --- a/src/test/run-make/llvm-phase/test.rs +++ b/src/test/run-make/llvm-phase/test.rs @@ -54,11 +54,7 @@ impl<'a> CompilerCalls<'a> for JitCalls { state.session.abort_if_errors(); let trans = state.trans.unwrap(); assert_eq!(trans.modules.len(), 1); - let rs_llmod = match trans.modules[0].source { - ModuleSource::Preexisting(_) => unimplemented!(), - ModuleSource::Translated(llvm) => llvm.llmod, - }; - unsafe { rustc_llvm::LLVMDumpModule(rs_llmod) }; + println!("name of compiled module = {}", trans.modules[0].name); }); cc } From 71751db49150b89b917130a68db0d1baef43af33 Mon Sep 17 00:00:00 2001 From: QuietMisdreavus Date: Fri, 28 Jul 2017 12:55:30 -0500 Subject: [PATCH 130/213] add documentation for function pointers as a primitive --- src/librustdoc/clean/mod.rs | 5 ++ src/librustdoc/html/format.rs | 8 +-- src/libstd/primitive_docs.rs | 101 ++++++++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+), 5 deletions(-) diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 39258dd3a246..a9636c7e2fd7 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -1548,6 +1548,7 @@ pub enum PrimitiveType { Tuple, RawPointer, Reference, + Fn, } #[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)] @@ -1583,6 +1584,7 @@ impl Type { Tuple(..) => Some(PrimitiveType::Tuple), RawPointer(..) => Some(PrimitiveType::RawPointer), BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference), + BareFunction(..) => Some(PrimitiveType::Fn), _ => None, } } @@ -1636,6 +1638,7 @@ impl PrimitiveType { "tuple" => Some(PrimitiveType::Tuple), "pointer" => Some(PrimitiveType::RawPointer), "reference" => Some(PrimitiveType::Reference), + "fn" => Some(PrimitiveType::Fn), _ => None, } } @@ -1665,6 +1668,7 @@ impl PrimitiveType { Tuple => "tuple", RawPointer => "pointer", Reference => "reference", + Fn => "fn", } } @@ -2561,6 +2565,7 @@ fn build_deref_target_impls(cx: &DocContext, Tuple => None, RawPointer => tcx.lang_items.const_ptr_impl(), Reference => None, + Fn => None, }; if let Some(did) = did { if !did.is_local() { diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 33ab5cf47de2..988890ffedcd 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -607,11 +607,9 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: decl.generics, decl.decl) } else { - write!(f, "{}{}fn{}{}", - UnsafetySpace(decl.unsafety), - AbiSpace(decl.abi), - decl.generics, - decl.decl) + write!(f, "{}{}", UnsafetySpace(decl.unsafety), AbiSpace(decl.abi))?; + primitive_link(f, PrimitiveType::Fn, "fn")?; + write!(f, "{}{}", decl.generics, decl.decl) } } clean::Tuple(ref typs) => { diff --git a/src/libstd/primitive_docs.rs b/src/libstd/primitive_docs.rs index 84dba274a2e7..7be319d1954e 100644 --- a/src/libstd/primitive_docs.rs +++ b/src/libstd/primitive_docs.rs @@ -839,3 +839,104 @@ mod prim_usize { } /// locally known. #[stable(feature = "rust1", since = "1.0.0")] mod prim_ref { } + +#[doc(primitive = "fn")] +// +/// Function pointers, like `fn(usize) -> bool`. +/// +/// *See also the traits [`Fn`], [`FnMut`], and [`FnOnce`].* +/// +/// [`Fn`]: ops/trait.Fn.html +/// [`FnMut`]: ops/trait.FnMut.html +/// [`FnOnce`]: ops/trait.FnOnce.html +/// +/// Plain function pointers are obtained by casting either plain functions, or closures that don't +/// capture an environment: +/// +/// ``` +/// fn add_one(x: usize) -> usize { +/// x + 1 +/// } +/// +/// let ptr: fn(usize) -> usize = add_one; +/// assert_eq!(ptr(5), 6); +/// +/// let clos: fn(usize) -> usize = |x| x + 5; +/// assert_eq!(clos(5), 10); +/// ``` +/// +/// In addition to varying based on their signature, function pointers come in two flavors: safe +/// and unsafe. Plain `fn()` function pointers can only point to safe functions, +/// while `unsafe fn()` function pointers can point to safe or unsafe functions. +/// +/// ``` +/// fn add_one(x: usize) -> usize { +/// x + 1 +/// } +/// +/// unsafe fn add_one_unsafely(x: usize) -> usize { +/// x + 1 +/// } +/// +/// let safe_ptr: fn(usize) -> usize = add_one; +/// +/// //ERROR: mismatched types: expected normal fn, found unsafe fn +/// //let bad_ptr: fn(usize) -> usize = add_one_unsafely; +/// +/// let unsafe_ptr: unsafe fn(usize) -> usize = add_one_unsafely; +/// let really_safe_ptr: unsafe fn(usize) -> usize = add_one; +/// ``` +/// +/// On top of that, function pointers can vary based on what ABI they use. This is achieved by +/// adding the `extern` keyword to the type name, followed by the ABI in question. For example, +/// `fn()` is different from `extern "C" fn()`, which itself is different from `extern "stdcall" +/// fn()`, and so on for the various ABIs that Rust supports. Non-`extern` functions have an ABI +/// of `"Rust"`, and `extern` functions without an explicit ABI have an ABI of `"C"`. For more +/// information, see [the nomicon's section on foreign calling conventions][nomicon-abi]. +/// +/// [nomicon-abi]: ../nomicon/ffi.html#foreign-calling-conventions +/// +/// Extern function declarations with the "C" or "cdecl" ABIs can also be *variadic*, allowing them +/// to be called with a variable number of arguments. Normal rust functions, even those with an +/// `extern "ABI"`, cannot be variadic. For more information, see [the nomicon's section on +/// variadic functions][nomicon-variadic]. +/// +/// [nomicon-variadic]: ../nomicon/ffi.html#variadic-functions +/// +/// These markers can be combined, so `unsafe extern "stdcall" fn()` is a valid type. +/// +/// Like references in rust, function pointers are assumed to not be null, so if you want to pass a +/// function pointer over FFI and be able to accomodate null pointers, make your type +/// `Option` with your required signature. +/// +/// Function pointers implement the following traits: +/// +/// * [`Clone`] +/// * [`PartialEq`] +/// * [`Eq`] +/// * [`PartialOrd`] +/// * [`Ord`] +/// * [`Hash`] +/// * [`Pointer`] +/// * [`Debug`] +/// +/// [`Clone`]: clone/trait.Clone.html +/// [`PartialEq`]: cmp/trait.PartialEq.html +/// [`Eq`]: cmp/trait.Eq.html +/// [`PartialOrd`]: cmp/trait.PartialOrd.html +/// [`Ord`]: cmp/trait.Ord.html +/// [`Hash`]: hash/trait.Hash.html +/// [`Pointer`]: fmt/trait.Pointer.html +/// [`Debug`]: fmt/trait.Debug.html +/// +/// Due to a temporary restriction in Rust's type system, these traits are only implemented on +/// functions that take 12 arguments or less, with the `"Rust"` and `"C"` ABIs. In the future, this +/// may change. +/// +/// In addition, function pointers of *any* signature, ABI, or safety are [`Copy`], and all *safe* +/// function pointers implement [`Fn`], [`FnMut`], and [`FnOnce`]. This works because these traits +/// are specially known to the compiler. +/// +/// [`Copy`]: marker/trait.Copy.html +#[stable(feature = "rust1", since = "1.0.0")] +mod prim_fn { } From 6bb0693fdebb68020911f3b9245cebc6f134154e Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Tue, 1 Aug 2017 16:34:20 +0200 Subject: [PATCH 131/213] incr.comp.: Assert that no DepNode is re-opened (see issue #42298). --- src/librustc/dep_graph/dep_node.rs | 1 + src/librustc/dep_graph/edges.rs | 10 ++++++++++ src/librustc/ty/maps.rs | 6 +++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 8e2c44a427b7..800689f4638d 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -394,6 +394,7 @@ define_dep_nodes!( <'tcx> // Represents different phases in the compiler. [] RegionMaps(DefId), [] Coherence, + [] CoherenceInherentImplOverlapCheck, [] Resolve, [] CoherenceCheckTrait(DefId), [] PrivacyAccessLevels(CrateNum), diff --git a/src/librustc/dep_graph/edges.rs b/src/librustc/dep_graph/edges.rs index 277b69262c92..9aa634770df9 100644 --- a/src/librustc/dep_graph/edges.rs +++ b/src/librustc/dep_graph/edges.rs @@ -23,6 +23,11 @@ pub struct DepGraphEdges { edges: FxHashSet<(DepNodeIndex, DepNodeIndex)>, task_stack: Vec, forbidden_edge: Option, + + // A set to help assert that no two tasks use the same DepNode. This is a + // temporary measure. Once we load the previous dep-graph as readonly, this + // check will fall out of the graph implementation naturally. + opened_once: FxHashSet, } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] @@ -80,6 +85,7 @@ impl DepGraphEdges { edges: FxHashSet(), task_stack: Vec::new(), forbidden_edge, + opened_once: FxHashSet(), } } @@ -97,6 +103,10 @@ impl DepGraphEdges { } pub fn push_task(&mut self, key: DepNode) { + if !self.opened_once.insert(key) { + bug!("Re-opened node {:?}", key) + } + self.task_stack.push(OpenTask::Regular { node: key, reads: Vec::new(), diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs index 7a45a706ea40..d62d8f986c23 100644 --- a/src/librustc/ty/maps.rs +++ b/src/librustc/ty/maps.rs @@ -931,7 +931,7 @@ define_maps! { <'tcx> /// Checks all types in the krate for overlap in their inherent impls. Reports errors. /// Not meant to be used directly outside of coherence. /// (Defined only for LOCAL_CRATE) - [] crate_inherent_impls_overlap_check: crate_inherent_impls_dep_node(CrateNum) -> (), + [] crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) -> (), /// Results of evaluating const items or constants embedded in /// other items (such as enum variant explicit discriminants). @@ -1014,6 +1014,10 @@ fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { DepConstructor::Coherence } +fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CoherenceInherentImplOverlapCheck +} + fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { DepConstructor::Reachability } From 4310edb4cb106e725bb63e05b2cba21d6bb2a85f Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 1 Aug 2017 08:48:28 -0700 Subject: [PATCH 132/213] handle tuple struct ctors --- src/librustc_mir/transform/add_validation.rs | 6 ++++- src/test/mir-opt/validate_1.rs | 24 ++++++++++---------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 6f136624f0ac..578a63e44b03 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -128,7 +128,7 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> match tcx.hir.find(cur) { Some(Node::NodeExpr(&hir::Expr { node: hir::ExprBlock(ref block), ..})) => { if block_is_unsafe(&*block) { - // We can bail out here. + // Found an unsafe block, we can bail out here. return true; } } @@ -138,6 +138,10 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> // Finally, visit the closure itself. finder.visit_expr(item); } + Some(Node::NodeStructCtor(_)) => { + // Auto-generated tuple struct ctor. Cannot contain unsafe code. + return false; + }, Some(_) | None => bug!("Expected function, method or closure, found {}", tcx.hir.node_to_string(fn_node_id)), diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs index b85d9261e4a9..542ba87fef4b 100644 --- a/src/test/mir-opt/validate_1.rs +++ b/src/test/mir-opt/validate_1.rs @@ -11,7 +11,7 @@ // ignore-tidy-linelength // compile-flags: -Z verbose -Z mir-emit-validate=1 -struct Test; +struct Test(i32); impl Test { // Make sure we run the pass on a method, not just on bare functions. @@ -20,7 +20,7 @@ impl Test { fn main() { let mut x = 0; - Test.foo(&mut x); + Test(0).foo(&mut x); // Also test closures let c = |x: &mut i32| { let y = &*x; *y }; @@ -31,29 +31,29 @@ fn main() { // the interesting lines of code also contain name of the source file, so we cannot test for it. // END RUST SOURCE -// START rustc.node10.EraseRegions.after.mir +// START rustc.node12.EraseRegions.after.mir // bb0: { // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(5) => validate_1/8cd878b::{{impl}}[0]::foo[0] }, BrAnon(0)) Test, _2: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(5) => validate_1/8cd878b::{{impl}}[0]::foo[0] }, BrAnon(1)) mut i32]); // return; // } -// END rustc.node10.EraseRegions.after.mir -// START rustc.node21.EraseRegions.after.mir +// END rustc.node12.EraseRegions.after.mir +// START rustc.node23.EraseRegions.after.mir // fn main() -> () { // bb0: { -// Validate(Suspend(ReScope(Misc(NodeId(30)))), [_1: i32]); +// Validate(Suspend(ReScope(Misc(NodeId(34)))), [_1: i32]); // _6 = &ReErased mut _1; -// Validate(Acquire, [(*_6): i32/ReScope(Misc(NodeId(30)))]); -// Validate(Suspend(ReScope(Misc(NodeId(30)))), [(*_6): i32/ReScope(Misc(NodeId(30)))]); +// Validate(Acquire, [(*_6): i32/ReScope(Misc(NodeId(34)))]); +// Validate(Suspend(ReScope(Misc(NodeId(34)))), [(*_6): i32/ReScope(Misc(NodeId(34)))]); // _5 = &ReErased mut (*_6); -// Validate(Acquire, [(*_5): i32/ReScope(Misc(NodeId(30)))]); -// Validate(Release, [_3: &ReScope(Misc(NodeId(30))) Test, _5: &ReScope(Misc(NodeId(30))) mut i32]); +// Validate(Acquire, [(*_5): i32/ReScope(Misc(NodeId(34)))]); +// Validate(Release, [_3: &ReScope(Misc(NodeId(34))) Test, _5: &ReScope(Misc(NodeId(34))) mut i32]); // _2 = const Test::foo(_3, _5) -> bb1; // } // // bb1: { // Validate(Acquire, [_2: ()]); -// EndRegion(ReScope(Misc(NodeId(30)))); +// EndRegion(ReScope(Misc(NodeId(34)))); // return; // } // } -// END rustc.node21.EraseRegions.after.mir +// END rustc.node23.EraseRegions.after.mir From 8f910bcbbcbd47b372670e0a347fcde340d25e5e Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 1 Aug 2017 09:22:58 -0700 Subject: [PATCH 133/213] handle trait items as well --- src/librustc_mir/transform/add_validation.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 578a63e44b03..374b658dfc7f 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -112,6 +112,7 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> match tcx.hir.find(fn_node_id) { Some(Node::NodeItem(item)) => finder.visit_item(item), Some(Node::NodeImplItem(item)) => finder.visit_impl_item(item), + Some(Node::NodeTraitItem(item)) => finder.visit_trait_item(item), Some(Node::NodeExpr(item)) => { // This is a closure. // We also have to walk up the parents and check that there is no unsafe block From bdb53e55b0d30ad4f5438eff74d0b705f8675d98 Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Tue, 1 Aug 2017 12:38:36 -0700 Subject: [PATCH 134/213] Fix the Solaris pthread_t raw type in std to match what's in libc The old type causes failures when building cargo 0.20.0 after changeset 8304e06b5 in the libc repo. --- src/libstd/os/solaris/raw.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/os/solaris/raw.rs b/src/libstd/os/solaris/raw.rs index b84fdba9ca25..5a813c5c76bc 100644 --- a/src/libstd/os/solaris/raw.rs +++ b/src/libstd/os/solaris/raw.rs @@ -32,7 +32,7 @@ use os::unix::raw::{uid_t, gid_t}; #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64; #[stable(feature = "pthread_t", since = "1.8.0")] -pub type pthread_t = usize; +pub type pthread_t = u32; #[repr(C)] #[derive(Clone)] From c5154d036d09b5274a9e7bcf9f343520728d4c07 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 1 Aug 2017 13:07:45 -0700 Subject: [PATCH 135/213] use FnLike to recognize functions for us --- src/librustc/hir/map/blocks.rs | 12 ++ src/librustc_mir/transform/add_validation.rs | 125 +++++++++---------- 2 files changed, 73 insertions(+), 64 deletions(-) diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs index 661798a82505..1b7eb1585671 100644 --- a/src/librustc/hir/map/blocks.rs +++ b/src/librustc/hir/map/blocks.rs @@ -192,6 +192,18 @@ impl<'a> FnLikeNode<'a> { } } + pub fn unsafety(self) -> ast::Unsafety { + match self.kind() { + FnKind::ItemFn(_, _, unsafety, ..) => { + unsafety + } + FnKind::Method(_, m, ..) => { + m.unsafety + } + _ => ast::Unsafety::Normal + } + } + pub fn kind(self) -> FnKind<'a> { let item = |p: ItemFnParts<'a>| -> FnKind<'a> { FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis, p.attrs) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 374b658dfc7f..2afaa0701181 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -14,8 +14,6 @@ //! of MIR building, and only after this pass we think of the program has having the //! normal MIR semantics. -use syntax_pos::Span; -use syntax::ast::NodeId; use rustc::ty::{self, TyCtxt, RegionKind}; use rustc::hir; use rustc::mir::*; @@ -84,9 +82,11 @@ fn lval_context<'a, 'tcx, D>( /// Check if this function contains an unsafe block or is an unsafe function. fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> bool { - use rustc::hir::intravisit::{self, Visitor}; + use rustc::hir::intravisit::{self, Visitor, FnKind}; + use rustc::hir::map::blocks::FnLikeNode; use rustc::hir::map::Node; + /// Decide if this is an unsafe block fn block_is_unsafe(block: &hir::Block) -> bool { use rustc::hir::BlockCheckMode::*; @@ -98,77 +98,74 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> } } - let fn_node_id = match src { - MirSource::Fn(node_id) => node_id, + /// Decide if this FnLike is a closure + fn fn_is_closure<'a>(fn_like: FnLikeNode<'a>) -> bool { + match fn_like.kind() { + FnKind::Closure(_) => true, + FnKind::Method(..) | FnKind::ItemFn(..) => false, + } + } + + let fn_like = match src { + MirSource::Fn(node_id) => { + match FnLikeNode::from_node(tcx.hir.get(node_id)) { + Some(fn_like) => fn_like, + None => return false, // e.g. struct ctor shims -- such auto-generated code cannot + // contain unsafe. + } + }, _ => return false, // only functions can have unsafe }; - struct FindUnsafe<'b, 'tcx> where 'tcx : 'b { - map: &'b hir::map::Map<'tcx>, - found_unsafe: bool, + // Test if the function is marked unsafe. + if fn_like.unsafety() == hir::Unsafety::Unsafe { + return true; } - let mut finder = FindUnsafe { map: &tcx.hir, found_unsafe: false }; - // Run the visitor on the NodeId we got. Seems like there is no uniform way to do that. - match tcx.hir.find(fn_node_id) { - Some(Node::NodeItem(item)) => finder.visit_item(item), - Some(Node::NodeImplItem(item)) => finder.visit_impl_item(item), - Some(Node::NodeTraitItem(item)) => finder.visit_trait_item(item), - Some(Node::NodeExpr(item)) => { - // This is a closure. - // We also have to walk up the parents and check that there is no unsafe block - // there. - let mut cur = fn_node_id; - loop { - // Go further upwards. - let parent = tcx.hir.get_parent_node(cur); - if cur == parent { + + // For closures, we need to walk up the parents and see if we are inside an unsafe fn or + // unsafe block. + if fn_is_closure(fn_like) { + let mut cur = fn_like.id(); + loop { + // Go further upwards. + let parent = tcx.hir.get_parent_node(cur); + if cur == parent { + bug!("Closures muts be inside a non-closure fn_like"); + } + cur = parent; + // Check if this is an unsafe block + match tcx.hir.find(cur) { + Some(Node::NodeExpr(&hir::Expr { node: hir::ExprBlock(ref block), ..})) => { + if block_is_unsafe(&*block) { + // Found an unsafe block, we can bail out here. + return true; + } + } + _ => {}, + } + // Check if this is a non-closure fn_like, at which point we have to stop moving up + if let Some(fn_like) = FnLikeNode::from_node(tcx.hir.get(cur)) { + if !fn_is_closure(fn_like) { + if fn_like.unsafety() == hir::Unsafety::Unsafe { + return true; + } break; } - cur = parent; - // Check if this is a a block - match tcx.hir.find(cur) { - Some(Node::NodeExpr(&hir::Expr { node: hir::ExprBlock(ref block), ..})) => { - if block_is_unsafe(&*block) { - // Found an unsafe block, we can bail out here. - return true; - } - } - _ => {}, - } } - // Finally, visit the closure itself. - finder.visit_expr(item); } - Some(Node::NodeStructCtor(_)) => { - // Auto-generated tuple struct ctor. Cannot contain unsafe code. - return false; - }, - Some(_) | None => - bug!("Expected function, method or closure, found {}", - tcx.hir.node_to_string(fn_node_id)), - }; + } - impl<'b, 'tcx> Visitor<'tcx> for FindUnsafe<'b, 'tcx> { + // Visit the entire body of the function and check for unsafe blocks in there + struct FindUnsafe { + found_unsafe: bool, + } + let mut finder = FindUnsafe { found_unsafe: false }; + // Run the visitor on the NodeId we got. Seems like there is no uniform way to do that. + finder.visit_body(tcx.hir.body(fn_like.body())); + + impl<'tcx> Visitor<'tcx> for FindUnsafe { fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> { - intravisit::NestedVisitorMap::OnlyBodies(self.map) - } - - fn visit_fn(&mut self, fk: intravisit::FnKind<'tcx>, fd: &'tcx hir::FnDecl, - b: hir::BodyId, s: Span, id: NodeId) - { - assert!(!self.found_unsafe, "We should never see a fn when we already saw unsafe"); - let is_unsafe = match fk { - intravisit::FnKind::ItemFn(_, _, unsafety, ..) => unsafety == hir::Unsafety::Unsafe, - intravisit::FnKind::Method(_, sig, ..) => sig.unsafety == hir::Unsafety::Unsafe, - intravisit::FnKind::Closure(_) => false, - }; - if is_unsafe { - // This is unsafe, and we are done. - self.found_unsafe = true; - } else { - // Go on searching. - intravisit::walk_fn(self, fk, fd, b, s, id) - } + intravisit::NestedVisitorMap::None } fn visit_block(&mut self, b: &'tcx hir::Block) { From a8129d128c314975d4d34a47e9cb7127de0d0dbc Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 1 Aug 2017 13:14:32 -0700 Subject: [PATCH 136/213] add a closure inside an unsafe fn to the tests --- src/test/mir-opt/validate_4.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/mir-opt/validate_4.rs b/src/test/mir-opt/validate_4.rs index 49acaccd86ab..591de975740f 100644 --- a/src/test/mir-opt/validate_4.rs +++ b/src/test/mir-opt/validate_4.rs @@ -14,6 +14,8 @@ // Make sure unsafe fns and fns with an unsafe block only get restricted validation. unsafe fn write_42(x: *mut i32) -> bool { + let test_closure = |x: *mut i32| *x = 23; + test_closure(x); *x = 42; true } @@ -43,7 +45,7 @@ fn main() { // } // } // END rustc.node4.EraseRegions.after.mir -// START rustc.node17.EraseRegions.after.mir +// START rustc.node31.EraseRegions.after.mir // fn test(_1: &ReErased mut i32) -> () { // bb0: { // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_4/8cd878b::test[0] }, BrAnon(0)) mut i32]); @@ -55,4 +57,4 @@ fn main() { // Validate(Release, [_3: bool]); // } // } -// END rustc.node17.EraseRegions.after.mir +// END rustc.node31.EraseRegions.after.mir From 1b831cf54e441aa0c5d20a344f4c9f75d01d2538 Mon Sep 17 00:00:00 2001 From: Inokentiy Babushkin Date: Tue, 1 Aug 2017 22:27:30 +0200 Subject: [PATCH 137/213] Derive `Hash` on `AssociatedKind`. This is a trivial change useful in downstream code poking in rustc's innards. --- src/librustc/ty/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index e0b0aca1261b..f245b1503dab 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -174,7 +174,7 @@ pub struct AssociatedItem { pub method_has_self_argument: bool, } -#[derive(Copy, Clone, PartialEq, Eq, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, RustcEncodable, RustcDecodable)] pub enum AssociatedKind { Const, Method, From c3603f3ec669f8370fbe4bf98032527ee8f1c489 Mon Sep 17 00:00:00 2001 From: Florian Zeitz Date: Wed, 2 Aug 2017 00:32:14 +0200 Subject: [PATCH 138/213] trans: Check LLVM type instead of Layout --- src/librustc_trans/mir/rvalue.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 2cae2150885a..0485054a12ae 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{self, Layout, LayoutTyper, Primitive}; +use rustc::ty::layout::{Layout, LayoutTyper}; use rustc::mir::tcx::LvalueTy; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; @@ -107,6 +107,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let align = dest.alignment.to_align(); if let OperandValue::Immediate(v) = tr_elem.val { + // Use llvm.memset.p0i8.* to initialize all zero arrays if common::is_const_integral(v) && common::const_to_uint(v) == 0 { let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); let align = C_i32(bcx.ccx, align as i32); @@ -116,20 +117,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { base::call_memset(&bcx, base, fill, size, align, false); return bcx; } - } - // Use llvm.memset.p0i8.* to initialize byte arrays - let elem_layout = bcx.ccx.layout_of(tr_elem.ty).layout; - match *elem_layout { - Layout::Scalar { value: Primitive::Int(layout::I8), .. } | - Layout::CEnum { discr: layout::I8, .. } => { + // Use llvm.memset.p0i8.* to initialize byte arrays + if common::val_ty(v) == Type::i8(bcx.ccx) { let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); let align = C_i32(bcx.ccx, align as i32); let fill = tr_elem.immediate(); base::call_memset(&bcx, base, fill, size, align, false); return bcx; } - _ => () } tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { From 881062776afe4575f3c9d6534f688a70cf34a7db Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Tue, 1 Aug 2017 07:41:06 -0400 Subject: [PATCH 139/213] Add doc example for HashSet::hasher. --- src/libstd/collections/hash/set.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index d80df5f18b61..040595bbb042 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -215,6 +215,17 @@ impl HashSet /// Returns a reference to the set's [`BuildHasher`]. /// /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use std::collections::HashSet; + /// use std::collections::hash_map::RandomState; + /// + /// let hasher = RandomState::new(); + /// let set: HashSet = HashSet::with_hasher(hasher); + /// let hasher: &RandomState = set.hasher(); + /// ``` #[stable(feature = "hashmap_public_hasher", since = "1.9.0")] pub fn hasher(&self) -> &S { self.map.hasher() From 9e192602860c897974e81c0c93b9c7293ce0fd3e Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Tue, 1 Aug 2017 07:42:59 -0400 Subject: [PATCH 140/213] Show that the capacity changed in HashSet::reserve doc example. --- src/libstd/collections/hash/set.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index 040595bbb042..0e65a30f3b2a 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -260,6 +260,7 @@ impl HashSet /// use std::collections::HashSet; /// let mut set: HashSet = HashSet::new(); /// set.reserve(10); + /// assert!(set.capacity() >= 10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { From 070eb3c66775789fbfe1607d3d6ef643c9afe3db Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Tue, 1 Aug 2017 07:44:43 -0400 Subject: [PATCH 141/213] Indicate HashSet is code-like in docs. --- src/libstd/collections/hash/set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index 0e65a30f3b2a..ff19a0a1f149 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -123,7 +123,7 @@ pub struct HashSet { } impl HashSet { - /// Creates an empty HashSet. + /// Creates an empty `HashSet`. /// /// # Examples /// From 9e2b0c6390526e46f7bd217e13c281f7895bd1d9 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Tue, 1 Aug 2017 07:46:41 -0400 Subject: [PATCH 142/213] Remove unnecessary 'mut' bindings. --- src/libstd/collections/hash/set.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index ff19a0a1f149..f8a316657140 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -129,7 +129,7 @@ impl HashSet { /// /// ``` /// use std::collections::HashSet; - /// let mut set: HashSet = HashSet::new(); + /// let set: HashSet = HashSet::new(); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -146,7 +146,7 @@ impl HashSet { /// /// ``` /// use std::collections::HashSet; - /// let mut set: HashSet = HashSet::with_capacity(10); + /// let set: HashSet = HashSet::with_capacity(10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] From 1599fad5b485cbfbed698df3590665f064999948 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Tue, 1 Aug 2017 07:47:17 -0400 Subject: [PATCH 143/213] Show the capacity in HashSet::with_capacity doc example. --- src/libstd/collections/hash/set.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index f8a316657140..a02674ed109a 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -147,6 +147,7 @@ impl HashSet { /// ``` /// use std::collections::HashSet; /// let set: HashSet = HashSet::with_capacity(10); + /// assert!(set.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] From 34c1bfb0e142587bbbede848b69b2d498b8ede34 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Tue, 1 Aug 2017 07:55:44 -0400 Subject: [PATCH 144/213] Remove unnecessary clones in doc examples. --- src/libstd/collections/hash/set.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index a02674ed109a..3c39db3fbabb 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -325,13 +325,13 @@ impl HashSet /// println!("{}", x); // Print 1 /// } /// - /// let diff: HashSet<_> = a.difference(&b).cloned().collect(); - /// assert_eq!(diff, [1].iter().cloned().collect()); + /// let diff: HashSet<_> = a.difference(&b).collect(); + /// assert_eq!(diff, [1].iter().collect()); /// /// // Note that difference is not symmetric, /// // and `b - a` means something else: - /// let diff: HashSet<_> = b.difference(&a).cloned().collect(); - /// assert_eq!(diff, [4].iter().cloned().collect()); + /// let diff: HashSet<_> = b.difference(&a).collect(); + /// assert_eq!(diff, [4].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn difference<'a>(&'a self, other: &'a HashSet) -> Difference<'a, T, S> { @@ -356,11 +356,11 @@ impl HashSet /// println!("{}", x); /// } /// - /// let diff1: HashSet<_> = a.symmetric_difference(&b).cloned().collect(); - /// let diff2: HashSet<_> = b.symmetric_difference(&a).cloned().collect(); + /// let diff1: HashSet<_> = a.symmetric_difference(&b).collect(); + /// let diff2: HashSet<_> = b.symmetric_difference(&a).collect(); /// /// assert_eq!(diff1, diff2); - /// assert_eq!(diff1, [1, 4].iter().cloned().collect()); + /// assert_eq!(diff1, [1, 4].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn symmetric_difference<'a>(&'a self, @@ -384,8 +384,8 @@ impl HashSet /// println!("{}", x); /// } /// - /// let intersection: HashSet<_> = a.intersection(&b).cloned().collect(); - /// assert_eq!(intersection, [2, 3].iter().cloned().collect()); + /// let intersection: HashSet<_> = a.intersection(&b).collect(); + /// assert_eq!(intersection, [2, 3].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn intersection<'a>(&'a self, other: &'a HashSet) -> Intersection<'a, T, S> { @@ -410,8 +410,8 @@ impl HashSet /// println!("{}", x); /// } /// - /// let union: HashSet<_> = a.union(&b).cloned().collect(); - /// assert_eq!(union, [1, 2, 3, 4].iter().cloned().collect()); + /// let union: HashSet<_> = a.union(&b).collect(); + /// assert_eq!(union, [1, 2, 3, 4].iter().collect()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn union<'a>(&'a self, other: &'a HashSet) -> Union<'a, T, S> { From d9df2963ad40b67aecde95cbfe98599a45351352 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Tue, 1 Aug 2017 08:12:01 -0400 Subject: [PATCH 145/213] Add doc example for HashSet::drain. --- src/libstd/collections/hash/set.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index 3c39db3fbabb..80a223c7d74e 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -453,6 +453,22 @@ impl HashSet } /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// use std::collections::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert!(!set.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in set.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(set.is_empty()); + /// ``` #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain(&mut self) -> Drain { From 877ec946543cae6f2c86742e79bcb3182e8960df Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 8 Jul 2017 20:46:43 +0300 Subject: [PATCH 146/213] rustc_apfloat: introduce the base Float API. --- src/Cargo.lock | 8 + src/librustc_apfloat/Cargo.toml | 11 + src/librustc_apfloat/lib.rs | 689 +++++++++++++++++++++++++++++ src/librustc_const_math/Cargo.toml | 1 + 4 files changed, 709 insertions(+) create mode 100644 src/librustc_apfloat/Cargo.toml create mode 100644 src/librustc_apfloat/lib.rs diff --git a/src/Cargo.lock b/src/Cargo.lock index 5f363cb4c487..33531f104809 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1250,6 +1250,13 @@ dependencies = [ "syntax_pos 0.0.0", ] +[[package]] +name = "rustc_apfloat" +version = "0.0.0" +dependencies = [ + "rustc_bitflags 0.0.0", +] + [[package]] name = "rustc_asan" version = "0.0.0" @@ -1307,6 +1314,7 @@ dependencies = [ name = "rustc_const_math" version = "0.0.0" dependencies = [ + "rustc_apfloat 0.0.0", "serialize 0.0.0", "syntax 0.0.0", ] diff --git a/src/librustc_apfloat/Cargo.toml b/src/librustc_apfloat/Cargo.toml new file mode 100644 index 000000000000..b8f8488e3027 --- /dev/null +++ b/src/librustc_apfloat/Cargo.toml @@ -0,0 +1,11 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_apfloat" +version = "0.0.0" + +[lib] +name = "rustc_apfloat" +path = "lib.rs" + +[dependencies] +rustc_bitflags = { path = "../librustc_bitflags" } diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs new file mode 100644 index 000000000000..184f90c86dec --- /dev/null +++ b/src/librustc_apfloat/lib.rs @@ -0,0 +1,689 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Port of LLVM's APFloat software floating-point implementation from the +//! following C++ sources (please update commit hash when backporting): +//! https://github.com/llvm-mirror/llvm/tree/23efab2bbd424ed13495a420ad8641cb2c6c28f9 +//! * `include/llvm/ADT/APFloat.h` -> `Float` and `FloatConvert` traits +//! * `lib/Support/APFloat.cpp` -> `ieee` and `ppc` modules +//! * `unittests/ADT/APFloatTest.cpp` -> `tests` directory +//! +//! The port contains no unsafe code, global state, or side-effects in general, +//! and the only allocations are in the conversion to/from decimal strings. +//! +//! Most of the API and the testcases are intact in some form or another, +//! with some ergonomic changes, such as idiomatic short names, returning +//! new values instead of mutating the receiver, and having separate method +//! variants that take a non-default rounding mode (with the suffix `_r`). +//! Comments have been preserved where possible, only slightly adapted. +//! +//! Instead of keeping a pointer to a configuration struct and inspecting it +//! dynamically on every operation, types (e.g. `ieee::Double`), traits +//! (e.g. `ieee::Semantics`) and associated constants are employed for +//! increased type safety and performance. +//! +//! On-heap bigints are replaced everywhere (except in decimal conversion), +//! with short arrays of `type Limb = u128` elements (instead of `u64`), +//! This allows fitting the largest supported significands in one integer +//! (`ieee::Quad` and `ppc::Fallback` use slightly less than 128 bits). +//! All of the functions in the `ieee::sig` module operate on slices. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![crate_name = "rustc_apfloat"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![deny(warnings)] +#![forbid(unsafe_code)] + +#![feature(const_fn)] +#![feature(i128_type)] + +#[macro_use] +extern crate rustc_bitflags; + +use std::cmp::Ordering; +use std::fmt; +use std::ops::{Neg, Add, Sub, Mul, Div, Rem}; +use std::ops::{AddAssign, SubAssign, MulAssign, DivAssign, RemAssign, BitOrAssign}; +use std::str::FromStr; + +bitflags! { + /// IEEE-754R 7: Default exception handling. + /// + /// UNDERFLOW or OVERFLOW are always returned or-ed with INEXACT. + #[must_use] + #[derive(Debug)] + flags Status: u8 { + const OK = 0x00, + const INVALID_OP = 0x01, + const DIV_BY_ZERO = 0x02, + const OVERFLOW = 0x04, + const UNDERFLOW = 0x08, + const INEXACT = 0x10 + } +} + +impl BitOrAssign for Status { + fn bitor_assign(&mut self, rhs: Self) { + *self = *self | rhs; + } +} + +#[must_use] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct StatusAnd { + pub status: Status, + pub value: T, +} + +impl Status { + pub fn and(self, value: T) -> StatusAnd { + StatusAnd { + status: self, + value, + } + } +} + +impl StatusAnd { + fn map U, U>(self, f: F) -> StatusAnd { + StatusAnd { + status: self.status, + value: f(self.value), + } + } +} + +#[macro_export] +macro_rules! unpack { + ($status:ident|=, $e:expr) => { + match $e { + $crate::StatusAnd { status, value } => { + $status |= status; + value + } + } + }; + ($status:ident=, $e:expr) => { + match $e { + $crate::StatusAnd { status, value } => { + $status = status; + value + } + } + } +} + +/// Category of internally-represented number. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Category { + Infinity, + NaN, + Normal, + Zero, +} + +/// IEEE-754R 4.3: Rounding-direction attributes. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Round { + NearestTiesToEven, + TowardPositive, + TowardNegative, + TowardZero, + NearestTiesToAway, +} + +impl Neg for Round { + type Output = Round; + fn neg(self) -> Round { + match self { + Round::TowardPositive => Round::TowardNegative, + Round::TowardNegative => Round::TowardPositive, + Round::NearestTiesToEven | Round::TowardZero | Round::NearestTiesToAway => self, + } + } +} + +/// A signed type to represent a floating point number's unbiased exponent. +pub type ExpInt = i16; + +// \c ilogb error results. +pub const IEK_INF: ExpInt = ExpInt::max_value(); +pub const IEK_NAN: ExpInt = ExpInt::min_value(); +pub const IEK_ZERO: ExpInt = ExpInt::min_value() + 1; + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct ParseError(pub &'static str); + +/// A self-contained host- and target-independent arbitrary-precision +/// floating-point software implementation. +/// +/// `apfloat` uses significand bignum integer arithmetic as provided by functions +/// in the `ieee::sig`. +/// +/// Written for clarity rather than speed, in particular with a view to use in +/// the front-end of a cross compiler so that target arithmetic can be correctly +/// performed on the host. Performance should nonetheless be reasonable, +/// particularly for its intended use. It may be useful as a base +/// implementation for a run-time library during development of a faster +/// target-specific one. +/// +/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all +/// implemented operations. Currently implemented operations are add, subtract, +/// multiply, divide, fused-multiply-add, conversion-to-float, +/// conversion-to-integer and conversion-from-integer. New rounding modes +/// (e.g. away from zero) can be added with three or four lines of code. +/// +/// Four formats are built-in: IEEE single precision, double precision, +/// quadruple precision, and x87 80-bit extended double (when operating with +/// full extended precision). Adding a new format that obeys IEEE semantics +/// only requires adding two lines of code: a declaration and definition of the +/// format. +/// +/// All operations return the status of that operation as an exception bit-mask, +/// so multiple operations can be done consecutively with their results or-ed +/// together. The returned status can be useful for compiler diagnostics; e.g., +/// inexact, underflow and overflow can be easily diagnosed on constant folding, +/// and compiler optimizers can determine what exceptions would be raised by +/// folding operations and optimize, or perhaps not optimize, accordingly. +/// +/// At present, underflow tininess is detected after rounding; it should be +/// straight forward to add support for the before-rounding case too. +/// +/// The library reads hexadecimal floating point numbers as per C99, and +/// correctly rounds if necessary according to the specified rounding mode. +/// Syntax is required to have been validated by the caller. +/// +/// It also reads decimal floating point numbers and correctly rounds according +/// to the specified rounding mode. +/// +/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit +/// signed exponent, and the significand as an array of integer limbs. After +/// normalization of a number of precision P the exponent is within the range of +/// the format, and if the number is not denormal the P-th bit of the +/// significand is set as an explicit integer bit. For denormals the most +/// significant bit is shifted right so that the exponent is maintained at the +/// format's minimum, so that the smallest denormal has just the least +/// significant bit of the significand set. The sign of zeros and infinities +/// is significant; the exponent and significand of such numbers is not stored, +/// but has a known implicit (deterministic) value: 0 for the significands, 0 +/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and +/// significand are deterministic, although not really meaningful, and preserved +/// in non-conversion operations. The exponent is implicitly all 1 bits. +/// +/// `apfloat` does not provide any exception handling beyond default exception +/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause +/// by encoding Signaling NaNs with the first bit of its trailing significand as +/// 0. +/// +/// Future work +/// =========== +/// +/// Some features that may or may not be worth adding: +/// +/// Optional ability to detect underflow tininess before rounding. +/// +/// New formats: x87 in single and double precision mode (IEEE apart from +/// extended exponent range) (hard). +/// +/// New operations: sqrt, nexttoward. +/// +pub trait Float + : Copy + + Default + + FromStr + + PartialOrd + + fmt::Display + + Neg + + AddAssign + + SubAssign + + MulAssign + + DivAssign + + RemAssign + + Add> + + Sub> + + Mul> + + Div> + + Rem> { + /// Total number of bits in the in-memory format. + const BITS: usize; + + /// Number of bits in the significand. This includes the integer bit. + const PRECISION: usize; + + /// The largest E such that 2^E is representable; this matches the + /// definition of IEEE 754. + const MAX_EXP: ExpInt; + + /// The smallest E such that 2^E is a normalized number; this + /// matches the definition of IEEE 754. + const MIN_EXP: ExpInt; + + /// Positive Zero. + const ZERO: Self; + + /// Positive Infinity. + const INFINITY: Self; + + /// NaN (Not a Number). + // FIXME(eddyb) provide a default when qnan becomes const fn. + const NAN: Self; + + /// Factory for QNaN values. + // FIXME(eddyb) should be const fn. + fn qnan(payload: Option) -> Self; + + /// Factory for SNaN values. + // FIXME(eddyb) should be const fn. + fn snan(payload: Option) -> Self; + + /// Largest finite number. + // FIXME(eddyb) should be const (but FloatPair::largest is nontrivial). + fn largest() -> Self; + + /// Smallest (by magnitude) finite number. + /// Might be denormalized, which implies a relative loss of precision. + const SMALLEST: Self; + + /// Smallest (by magnitude) normalized finite number. + // FIXME(eddyb) should be const (but FloatPair::smallest_normalized is nontrivial). + fn smallest_normalized() -> Self; + + // Arithmetic + + fn add_r(self, rhs: Self, round: Round) -> StatusAnd; + fn sub_r(self, rhs: Self, round: Round) -> StatusAnd { + self.add_r(-rhs, round) + } + fn mul_r(self, rhs: Self, round: Round) -> StatusAnd; + fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd; + fn mul_add(self, multiplicand: Self, addend: Self) -> StatusAnd { + self.mul_add_r(multiplicand, addend, Round::NearestTiesToEven) + } + fn div_r(self, rhs: Self, round: Round) -> StatusAnd; + /// IEEE remainder. + // This is not currently correct in all cases. + fn ieee_rem(self, rhs: Self) -> StatusAnd { + let mut v = self; + + let status; + v = unpack!(status=, v / rhs); + if status == Status::DIV_BY_ZERO { + return status.and(self); + } + + assert!(Self::PRECISION < 128); + + let status; + let x = unpack!(status=, v.to_i128_r(128, Round::NearestTiesToEven, &mut false)); + if status == Status::INVALID_OP { + return status.and(self); + } + + let status; + let mut v = unpack!(status=, Self::from_i128(x)); + assert_eq!(status, Status::OK); // should always work + + let status; + v = unpack!(status=, v * rhs); + assert_eq!(status - Status::INEXACT, Status::OK); // should not overflow or underflow + + let status; + v = unpack!(status=, self - v); + assert_eq!(status - Status::INEXACT, Status::OK); // likewise + + if v.is_zero() { + status.and(v.copy_sign(self)) // IEEE754 requires this + } else { + status.and(v) + } + } + /// C fmod, or llvm frem. + fn c_fmod(self, rhs: Self) -> StatusAnd; + fn round_to_integral(self, round: Round) -> StatusAnd; + + /// IEEE-754R 2008 5.3.1: nextUp. + fn next_up(self) -> StatusAnd; + + /// IEEE-754R 2008 5.3.1: nextDown. + /// + /// *NOTE* since nextDown(x) = -nextUp(-x), we only implement nextUp with + /// appropriate sign switching before/after the computation. + fn next_down(self) -> StatusAnd { + (-self).next_up().map(|r| -r) + } + + fn abs(self) -> Self { + if self.is_negative() { -self } else { self } + } + fn copy_sign(self, rhs: Self) -> Self { + if self.is_negative() != rhs.is_negative() { + -self + } else { + self + } + } + + // Conversions + fn from_bits(input: u128) -> Self; + fn from_i128_r(input: i128, round: Round) -> StatusAnd { + if input < 0 { + Self::from_u128_r(-input as u128, -round).map(|r| -r) + } else { + Self::from_u128_r(input as u128, round) + } + } + fn from_i128(input: i128) -> StatusAnd { + Self::from_i128_r(input, Round::NearestTiesToEven) + } + fn from_u128_r(input: u128, round: Round) -> StatusAnd; + fn from_u128(input: u128) -> StatusAnd { + Self::from_u128_r(input, Round::NearestTiesToEven) + } + fn from_str_r(s: &str, round: Round) -> Result, ParseError>; + fn to_bits(self) -> u128; + + /// Convert a floating point number to an integer according to the + /// rounding mode. In case of an invalid operation exception, + /// deterministic values are returned, namely zero for NaNs and the + /// minimal or maximal value respectively for underflow or overflow. + /// If the rounded value is in range but the floating point number is + /// not the exact integer, the C standard doesn't require an inexact + /// exception to be raised. IEEE-854 does require it so we do that. + /// + /// Note that for conversions to integer type the C standard requires + /// round-to-zero to always be used. + /// + /// The *is_exact output tells whether the result is exact, in the sense + /// that converting it back to the original floating point type produces + /// the original value. This is almost equivalent to result==Status::OK, + /// except for negative zeroes. + fn to_i128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd { + let status; + if self.is_negative() { + if self.is_zero() { + // Negative zero can't be represented as an int. + *is_exact = false; + } + let r = unpack!(status=, (-self).to_u128_r(width, -round, is_exact)); + + // Check for values that don't fit in the signed integer. + if r > (1 << (width - 1)) { + // Return the most negative integer for the given width. + *is_exact = false; + Status::INVALID_OP.and(-1 << (width - 1)) + } else { + status.and(r.wrapping_neg() as i128) + } + } else { + // Positive case is simpler, can pretend it's a smaller unsigned + // integer, and `to_u128` will take care of all the edge cases. + self.to_u128_r(width - 1, round, is_exact).map( + |r| r as i128, + ) + } + } + fn to_i128(self, width: usize) -> StatusAnd { + self.to_i128_r(width, Round::TowardZero, &mut true) + } + fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd; + fn to_u128(self, width: usize) -> StatusAnd { + self.to_u128_r(width, Round::TowardZero, &mut true) + } + + fn cmp_abs_normal(self, rhs: Self) -> Ordering; + + /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0). + fn bitwise_eq(self, rhs: Self) -> bool; + + // IEEE-754R 5.7.2 General operations. + + /// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if + /// both are not NaN. If either argument is a NaN, returns the other argument. + fn min(self, other: Self) -> Self { + if self.is_nan() { + other + } else if other.is_nan() { + self + } else if other.partial_cmp(&self) == Some(Ordering::Less) { + other + } else { + self + } + } + + /// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if + /// both are not NaN. If either argument is a NaN, returns the other argument. + fn max(self, other: Self) -> Self { + if self.is_nan() { + other + } else if other.is_nan() { + self + } else if self.partial_cmp(&other) == Some(Ordering::Less) { + other + } else { + self + } + } + + /// IEEE-754R isSignMinus: Returns true if and only if the current value is + /// negative. + /// + /// This applies to zeros and NaNs as well. + fn is_negative(self) -> bool; + + /// IEEE-754R isNormal: Returns true if and only if the current value is normal. + /// + /// This implies that the current value of the float is not zero, subnormal, + /// infinite, or NaN following the definition of normality from IEEE-754R. + fn is_normal(self) -> bool { + !self.is_denormal() && self.is_finite_non_zero() + } + + /// Returns true if and only if the current value is zero, subnormal, or + /// normal. + /// + /// This means that the value is not infinite or NaN. + fn is_finite(self) -> bool { + !self.is_nan() && !self.is_infinite() + } + + /// Returns true if and only if the float is plus or minus zero. + fn is_zero(self) -> bool { + self.category() == Category::Zero + } + + /// IEEE-754R isSubnormal(): Returns true if and only if the float is a + /// denormal. + fn is_denormal(self) -> bool; + + /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity. + fn is_infinite(self) -> bool { + self.category() == Category::Infinity + } + + /// Returns true if and only if the float is a quiet or signaling NaN. + fn is_nan(self) -> bool { + self.category() == Category::NaN + } + + /// Returns true if and only if the float is a signaling NaN. + fn is_signaling(self) -> bool; + + // Simple Queries + + fn category(self) -> Category; + fn is_non_zero(self) -> bool { + !self.is_zero() + } + fn is_finite_non_zero(self) -> bool { + self.is_finite() && !self.is_zero() + } + fn is_pos_zero(self) -> bool { + self.is_zero() && !self.is_negative() + } + fn is_neg_zero(self) -> bool { + self.is_zero() && self.is_negative() + } + + /// Returns true if and only if the number has the smallest possible non-zero + /// magnitude in the current semantics. + fn is_smallest(self) -> bool { + Self::SMALLEST.copy_sign(self).bitwise_eq(self) + } + + /// Returns true if and only if the number has the largest possible finite + /// magnitude in the current semantics. + fn is_largest(self) -> bool { + Self::largest().copy_sign(self).bitwise_eq(self) + } + + /// Returns true if and only if the number is an exact integer. + fn is_integer(self) -> bool { + // This could be made more efficient; I'm going for obviously correct. + if !self.is_finite() { + return false; + } + self.round_to_integral(Round::TowardZero).value.bitwise_eq( + self, + ) + } + + /// If this value has an exact multiplicative inverse, return it. + fn get_exact_inverse(self) -> Option; + + /// Returns the exponent of the internal representation of the Float. + /// + /// Because the radix of Float is 2, this is equivalent to floor(log2(x)). + /// For special Float values, this returns special error codes: + /// + /// NaN -> \c IEK_NAN + /// 0 -> \c IEK_ZERO + /// Inf -> \c IEK_INF + /// + fn ilogb(self) -> ExpInt; + + /// Returns: self * 2^exp for integral exponents. + fn scalbn_r(self, exp: ExpInt, round: Round) -> Self; + fn scalbn(self, exp: ExpInt) -> Self { + self.scalbn_r(exp, Round::NearestTiesToEven) + } + + /// Equivalent of C standard library function. + /// + /// While the C standard says exp is an unspecified value for infinity and nan, + /// this returns INT_MAX for infinities, and INT_MIN for NaNs (see `ilogb`). + fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self; + fn frexp(self, exp: &mut ExpInt) -> Self { + self.frexp_r(exp, Round::NearestTiesToEven) + } +} + +pub trait FloatConvert: Float { + /// Convert a value of one floating point type to another. + /// The return value corresponds to the IEEE754 exceptions. *loses_info + /// records whether the transformation lost information, i.e. whether + /// converting the result back to the original type will produce the + /// original value (this is almost the same as return value==Status::OK, + /// but there are edge cases where this is not so). + fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd; + fn convert(self, loses_info: &mut bool) -> StatusAnd { + self.convert_r(Round::NearestTiesToEven, loses_info) + } +} + +#[allow(unused)] +macro_rules! float_common_impls { + ($ty:ident<$t:tt>) => { + impl<$t> Default for $ty<$t> where Self: Float { + fn default() -> Self { + Self::ZERO + } + } + + impl<$t> ::std::str::FromStr for $ty<$t> where Self: Float { + type Err = ParseError; + fn from_str(s: &str) -> Result { + Self::from_str_r(s, Round::NearestTiesToEven).map(|x| x.value) + } + } + + // Rounding ties to the nearest even, by default. + + impl<$t> ::std::ops::Add for $ty<$t> where Self: Float { + type Output = StatusAnd; + fn add(self, rhs: Self) -> StatusAnd { + self.add_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Sub for $ty<$t> where Self: Float { + type Output = StatusAnd; + fn sub(self, rhs: Self) -> StatusAnd { + self.sub_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Mul for $ty<$t> where Self: Float { + type Output = StatusAnd; + fn mul(self, rhs: Self) -> StatusAnd { + self.mul_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Div for $ty<$t> where Self: Float { + type Output = StatusAnd; + fn div(self, rhs: Self) -> StatusAnd { + self.div_r(rhs, Round::NearestTiesToEven) + } + } + + impl<$t> ::std::ops::Rem for $ty<$t> where Self: Float { + type Output = StatusAnd; + fn rem(self, rhs: Self) -> StatusAnd { + self.c_fmod(rhs) + } + } + + impl<$t> ::std::ops::AddAssign for $ty<$t> where Self: Float { + fn add_assign(&mut self, rhs: Self) { + *self = (*self + rhs).value; + } + } + + impl<$t> ::std::ops::SubAssign for $ty<$t> where Self: Float { + fn sub_assign(&mut self, rhs: Self) { + *self = (*self - rhs).value; + } + } + + impl<$t> ::std::ops::MulAssign for $ty<$t> where Self: Float { + fn mul_assign(&mut self, rhs: Self) { + *self = (*self * rhs).value; + } + } + + impl<$t> ::std::ops::DivAssign for $ty<$t> where Self: Float { + fn div_assign(&mut self, rhs: Self) { + *self = (*self / rhs).value; + } + } + + impl<$t> ::std::ops::RemAssign for $ty<$t> where Self: Float { + fn rem_assign(&mut self, rhs: Self) { + *self = (*self % rhs).value; + } + } + } +} diff --git a/src/librustc_const_math/Cargo.toml b/src/librustc_const_math/Cargo.toml index e74c1ef693ce..41310ede3e08 100644 --- a/src/librustc_const_math/Cargo.toml +++ b/src/librustc_const_math/Cargo.toml @@ -9,5 +9,6 @@ path = "lib.rs" crate-type = ["dylib"] [dependencies] +rustc_apfloat = { path = "../librustc_apfloat" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } From 2686a7af791204948502295d383954d5895145f7 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 12 Jul 2017 01:06:02 +0300 Subject: [PATCH 147/213] rustc_apfloat: stub IEEE & PPC implementations. --- src/librustc_apfloat/ieee.rs | 265 +++++++++++++++++++++++++++++++++++ src/librustc_apfloat/lib.rs | 4 +- src/librustc_apfloat/ppc.rs | 187 ++++++++++++++++++++++++ 3 files changed, 455 insertions(+), 1 deletion(-) create mode 100644 src/librustc_apfloat/ieee.rs create mode 100644 src/librustc_apfloat/ppc.rs diff --git a/src/librustc_apfloat/ieee.rs b/src/librustc_apfloat/ieee.rs new file mode 100644 index 000000000000..aab426a5caff --- /dev/null +++ b/src/librustc_apfloat/ieee.rs @@ -0,0 +1,265 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {Category, ExpInt}; +use {Float, FloatConvert, ParseError, Round, StatusAnd}; + +use std::cmp::Ordering; +use std::fmt; +use std::marker::PhantomData; +use std::ops::Neg; + +#[must_use] +pub struct IeeeFloat { + marker: PhantomData, +} + +/// Represents floating point arithmetic semantics. +pub trait Semantics: Sized { + /// Total number of bits in the in-memory format. + const BITS: usize; + + /// Number of bits in the significand. This includes the integer bit. + const PRECISION: usize; + + /// The largest E such that 2^E is representable; this matches the + /// definition of IEEE 754. + const MAX_EXP: ExpInt; + + /// The smallest E such that 2^E is a normalized number; this + /// matches the definition of IEEE 754. + const MIN_EXP: ExpInt = -Self::MAX_EXP + 1; +} + +impl Copy for IeeeFloat {} +impl Clone for IeeeFloat { + fn clone(&self) -> Self { + *self + } +} + +macro_rules! ieee_semantics { + ($($name:ident = $sem:ident($bits:tt : $exp_bits:tt)),*) => { + $(pub struct $sem;)* + $(pub type $name = IeeeFloat<$sem>;)* + $(impl Semantics for $sem { + const BITS: usize = $bits; + const PRECISION: usize = ($bits - 1 - $exp_bits) + 1; + const MAX_EXP: ExpInt = (1 << ($exp_bits - 1)) - 1; + })* + } +} + +ieee_semantics! { + Half = HalfS(16:5), + Single = SingleS(32:8), + Double = DoubleS(64:11), + Quad = QuadS(128:15) +} + +pub struct X87DoubleExtendedS; +pub type X87DoubleExtended = IeeeFloat; +impl Semantics for X87DoubleExtendedS { + const BITS: usize = 80; + const PRECISION: usize = 64; + const MAX_EXP: ExpInt = (1 << (15 - 1)) - 1; +} + +float_common_impls!(IeeeFloat); + +impl PartialEq for IeeeFloat { + fn eq(&self, rhs: &Self) -> bool { + self.partial_cmp(rhs) == Some(Ordering::Equal) + } +} + +#[allow(unused)] +impl PartialOrd for IeeeFloat { + fn partial_cmp(&self, rhs: &Self) -> Option { + panic!("NYI PartialOrd::partial_cmp"); + } +} + +impl Neg for IeeeFloat { + type Output = Self; + fn neg(self) -> Self { + panic!("NYI Neg::neg"); + } +} + +/// Prints this value as a decimal string. +/// +/// \param precision The maximum number of digits of +/// precision to output. If there are fewer digits available, +/// zero padding will not be used unless the value is +/// integral and small enough to be expressed in +/// precision digits. 0 means to use the natural +/// precision of the number. +/// \param width The maximum number of zeros to +/// consider inserting before falling back to scientific +/// notation. 0 means to always use scientific notation. +/// +/// \param alternate Indicate whether to remove the trailing zero in +/// fraction part or not. Also setting this parameter to true forces +/// producing of output more similar to default printf behavior. +/// Specifically the lower e is used as exponent delimiter and exponent +/// always contains no less than two digits. +/// +/// Number precision width Result +/// ------ --------- ----- ------ +/// 1.01E+4 5 2 10100 +/// 1.01E+4 4 2 1.01E+4 +/// 1.01E+4 5 1 1.01E+4 +/// 1.01E-2 5 2 0.0101 +/// 1.01E-2 4 2 0.0101 +/// 1.01E-2 4 1 1.01E-2 +#[allow(unused)] +impl fmt::Display for IeeeFloat { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let frac_digits = f.precision().unwrap_or(0); + let width = f.width().unwrap_or(3); + let alternate = f.alternate(); + panic!("NYI Display::fmt"); + } +} + +impl fmt::Debug for IeeeFloat { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +#[allow(unused)] +impl Float for IeeeFloat { + const BITS: usize = S::BITS; + const PRECISION: usize = S::PRECISION; + const MAX_EXP: ExpInt = S::MAX_EXP; + const MIN_EXP: ExpInt = S::MIN_EXP; + + const ZERO: Self = IeeeFloat { marker: PhantomData }; + + const INFINITY: Self = IeeeFloat { marker: PhantomData }; + + // FIXME(eddyb) remove when qnan becomes const fn. + const NAN: Self = IeeeFloat { marker: PhantomData }; + + fn qnan(payload: Option) -> Self { + panic!("NYI qnan") + } + + fn snan(payload: Option) -> Self { + panic!("NYI snan") + } + + fn largest() -> Self { + panic!("NYI largest") + } + + const SMALLEST: Self = IeeeFloat { marker: PhantomData }; + + fn smallest_normalized() -> Self { + panic!("NYI smallest_normalized") + } + + fn add_r(self, rhs: Self, round: Round) -> StatusAnd { + panic!("NYI add_r") + } + + fn mul_r(self, rhs: Self, round: Round) -> StatusAnd { + panic!("NYI mul_r") + } + + fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd { + panic!("NYI mul_add_r") + } + + fn div_r(self, rhs: Self, round: Round) -> StatusAnd { + panic!("NYI div_r") + } + + fn c_fmod(self, rhs: Self) -> StatusAnd { + panic!("NYI c_fmod") + } + + fn round_to_integral(self, round: Round) -> StatusAnd { + panic!("NYI round_to_integral") + } + + fn next_up(self) -> StatusAnd { + panic!("NYI next_up") + } + + fn from_bits(input: u128) -> Self { + panic!("NYI from_bits") + } + + fn from_u128_r(input: u128, round: Round) -> StatusAnd { + panic!("NYI from_u128_r") + } + + fn from_str_r(s: &str, round: Round) -> Result, ParseError> { + panic!("NYI from_str_r") + } + + fn to_bits(self) -> u128 { + panic!("NYI to_bits") + } + + fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd { + panic!("NYI to_u128_r"); + } + + fn cmp_abs_normal(self, rhs: Self) -> Ordering { + panic!("NYI cmp_abs_normal") + } + + fn bitwise_eq(self, rhs: Self) -> bool { + panic!("NYI bitwise_eq") + } + + fn is_negative(self) -> bool { + panic!("NYI is_negative") + } + + fn is_denormal(self) -> bool { + panic!("NYI is_denormal") + } + + fn is_signaling(self) -> bool { + panic!("NYI is_signaling") + } + + fn category(self) -> Category { + panic!("NYI category") + } + + fn get_exact_inverse(self) -> Option { + panic!("NYI get_exact_inverse") + } + + fn ilogb(self) -> ExpInt { + panic!("NYI ilogb") + } + + fn scalbn_r(self, exp: ExpInt, round: Round) -> Self { + panic!("NYI scalbn") + } + + fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self { + panic!("NYI frexp") + } +} + +#[allow(unused)] +impl FloatConvert> for IeeeFloat { + fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd> { + panic!("NYI convert_r"); + } +} diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs index 184f90c86dec..eb372b52c0a1 100644 --- a/src/librustc_apfloat/lib.rs +++ b/src/librustc_apfloat/lib.rs @@ -603,7 +603,6 @@ pub trait FloatConvert: Float { } } -#[allow(unused)] macro_rules! float_common_impls { ($ty:ident<$t:tt>) => { impl<$t> Default for $ty<$t> where Self: Float { @@ -687,3 +686,6 @@ macro_rules! float_common_impls { } } } + +pub mod ieee; +pub mod ppc; diff --git a/src/librustc_apfloat/ppc.rs b/src/librustc_apfloat/ppc.rs new file mode 100644 index 000000000000..03c4830d49e8 --- /dev/null +++ b/src/librustc_apfloat/ppc.rs @@ -0,0 +1,187 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {ieee, Category, ExpInt, Float, Round, ParseError, StatusAnd}; + +use std::cmp::Ordering; +use std::fmt; +use std::ops::Neg; + +#[must_use] +#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)] +pub struct DoubleFloat(F, F); +pub type DoubleDouble = DoubleFloat; + +// These are legacy semantics for the Fallback, inaccrurate implementation of +// IBM double-double, if the accurate DoubleDouble doesn't handle the +// operation. It's equivalent to having an IEEE number with consecutive 106 +// bits of mantissa and 11 bits of exponent. +// +// It's not equivalent to IBM double-double. For example, a legit IBM +// double-double, 1 + epsilon: +// +// 1 + epsilon = 1 + (1 >> 1076) +// +// is not representable by a consecutive 106 bits of mantissa. +// +// Currently, these semantics are used in the following way: +// +// DoubleDouble -> (Double, Double) -> +// DoubleDouble's Fallback -> IEEE operations +// +// FIXME: Implement all operations in DoubleDouble, and delete these +// semantics. +// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds. +pub struct FallbackS(F); +type Fallback = ieee::IeeeFloat>; +impl ieee::Semantics for FallbackS { + // Forbid any conversion to/from bits. + const BITS: usize = 0; + const PRECISION: usize = F::PRECISION * 2; + const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt; + const MIN_EXP: ExpInt = F::MIN_EXP as ExpInt + F::PRECISION as ExpInt; +} + +float_common_impls!(DoubleFloat); + +impl Neg for DoubleFloat { + type Output = Self; + fn neg(self) -> Self { + panic!("NYI Neg::neg"); + } +} + +#[allow(unused)] +impl fmt::Display for DoubleFloat { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + panic!("NYI Display::fmt"); + } +} + +#[allow(unused)] +impl Float for DoubleFloat { + const BITS: usize = F::BITS * 2; + const PRECISION: usize = Fallback::::PRECISION; + const MAX_EXP: ExpInt = Fallback::::MAX_EXP; + const MIN_EXP: ExpInt = Fallback::::MIN_EXP; + + const ZERO: Self = DoubleFloat(F::ZERO, F::ZERO); + + const INFINITY: Self = DoubleFloat(F::INFINITY, F::ZERO); + + // FIXME(eddyb) remove when qnan becomes const fn. + const NAN: Self = DoubleFloat(F::NAN, F::ZERO); + + fn qnan(payload: Option) -> Self { + panic!("NYI qnan") + } + + fn snan(payload: Option) -> Self { + panic!("NYI snan") + } + + fn largest() -> Self { + panic!("NYI largest") + } + + const SMALLEST: Self = DoubleFloat(F::SMALLEST, F::ZERO); + + fn smallest_normalized() -> Self { + panic!("NYI smallest_normalized") + } + + fn add_r(self, rhs: Self, round: Round) -> StatusAnd { + panic!("NYI add_r") + } + + fn mul_r(self, rhs: Self, round: Round) -> StatusAnd { + panic!("NYI mul_r") + } + + fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd { + panic!("NYI mul_add_r") + } + + fn div_r(self, rhs: Self, round: Round) -> StatusAnd { + panic!("NYI div_r") + } + + fn c_fmod(self, rhs: Self) -> StatusAnd { + panic!("NYI c_fmod") + } + + fn round_to_integral(self, round: Round) -> StatusAnd { + panic!("NYI round_to_integral") + } + + fn next_up(self) -> StatusAnd { + panic!("NYI next_up") + } + + fn from_bits(input: u128) -> Self { + panic!("NYI from_bits") + } + + fn from_u128_r(input: u128, round: Round) -> StatusAnd { + panic!("NYI from_u128_r") + } + + fn from_str_r(s: &str, round: Round) -> Result, ParseError> { + panic!("NYI from_str_r") + } + + fn to_bits(self) -> u128 { + panic!("NYI to_bits") + } + + fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd { + panic!("NYI to_u128_r"); + } + + fn cmp_abs_normal(self, rhs: Self) -> Ordering { + panic!("NYI cmp_abs_normal") + } + + fn bitwise_eq(self, rhs: Self) -> bool { + panic!("NYI bitwise_eq") + } + + fn is_negative(self) -> bool { + panic!("NYI is_negative") + } + + fn is_denormal(self) -> bool { + panic!("NYI is_denormal") + } + + fn is_signaling(self) -> bool { + panic!("NYI is_signaling") + } + + fn category(self) -> Category { + panic!("NYI category") + } + + fn get_exact_inverse(self) -> Option { + panic!("NYI get_exact_inverse") + } + + fn ilogb(self) -> ExpInt { + panic!("NYI ilogb") + } + + fn scalbn_r(self, exp: ExpInt, round: Round) -> Self { + panic!("NYI scalbn") + } + + fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self { + panic!("NYI frexp") + } +} From 7a5fccf33159f381468317b38889ddb11deff357 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Wed, 12 Jul 2017 01:06:56 +0300 Subject: [PATCH 148/213] rustc_apfloat: port the suite of unit tests. --- src/librustc_apfloat/tests/ieee.rs | 6891 ++++++++++++++++++++++++++++ src/librustc_apfloat/tests/ppc.rs | 655 +++ 2 files changed, 7546 insertions(+) create mode 100644 src/librustc_apfloat/tests/ieee.rs create mode 100644 src/librustc_apfloat/tests/ppc.rs diff --git a/src/librustc_apfloat/tests/ieee.rs b/src/librustc_apfloat/tests/ieee.rs new file mode 100644 index 000000000000..aff2076e0383 --- /dev/null +++ b/src/librustc_apfloat/tests/ieee.rs @@ -0,0 +1,6891 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(i128_type)] + +#[macro_use] +extern crate rustc_apfloat; + +use rustc_apfloat::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; +use rustc_apfloat::{Float, FloatConvert, ParseError, Round, Status}; +use rustc_apfloat::ieee::{Half, Single, Double, Quad, X87DoubleExtended}; + +trait SingleExt { + fn from_f32(input: f32) -> Self; + fn to_f32(self) -> f32; +} + +impl SingleExt for Single { + fn from_f32(input: f32) -> Self { + Self::from_bits(input.to_bits() as u128) + } + + fn to_f32(self) -> f32 { + f32::from_bits(self.to_bits() as u32) + } +} + +trait DoubleExt { + fn from_f64(input: f64) -> Self; + fn to_f64(self) -> f64; +} + +impl DoubleExt for Double { + fn from_f64(input: f64) -> Self { + Self::from_bits(input.to_bits() as u128) + } + + fn to_f64(self) -> f64 { + f64::from_bits(self.to_bits() as u64) + } +} + +#[test] +fn is_signaling() { + // We test qNaN, -qNaN, +sNaN, -sNaN with and without payloads. + let payload = 4; + assert!(!Single::qnan(None).is_signaling()); + assert!(!(-Single::qnan(None)).is_signaling()); + assert!(!Single::qnan(Some(payload)).is_signaling()); + assert!(!(-Single::qnan(Some(payload))).is_signaling()); + assert!(Single::snan(None).is_signaling()); + assert!((-Single::snan(None)).is_signaling()); + assert!(Single::snan(Some(payload)).is_signaling()); + assert!((-Single::snan(Some(payload))).is_signaling()); +} + +#[test] +fn next() { + // 1. Test Special Cases Values. + // + // Test all special values for nextUp and nextDown perscribed by IEEE-754R + // 2008. These are: + // 1. +inf + // 2. -inf + // 3. largest + // 4. -largest + // 5. smallest + // 6. -smallest + // 7. qNaN + // 8. sNaN + // 9. +0 + // 10. -0 + + let mut status; + + // nextUp(+inf) = +inf. + let test = unpack!(status=, Quad::INFINITY.next_up()); + let expected = Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+inf) = -nextUp(-inf) = -(-largest) = largest + let test = unpack!(status=, Quad::INFINITY.next_down()); + let expected = Quad::largest(); + assert_eq!(status, Status::OK); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-inf) = -largest + let test = unpack!(status=, (-Quad::INFINITY).next_up()); + let expected = -Quad::largest(); + assert_eq!(status, Status::OK); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-inf) = -nextUp(+inf) = -(+inf) = -inf. + let test = unpack!(status=, (-Quad::INFINITY).next_down()); + let expected = -Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite() && test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(largest) = +inf + let test = unpack!(status=, Quad::largest().next_up()); + let expected = Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite() && !test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(largest) = -nextUp(-largest) + // = -(-largest + inc) + // = largest - inc. + let test = unpack!(status=, Quad::largest().next_down()); + let expected = "0x1.fffffffffffffffffffffffffffep+16383" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_infinite() && !test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-largest) = -largest + inc. + let test = unpack!(status=, (-Quad::largest()).next_up()); + let expected = "-0x1.fffffffffffffffffffffffffffep+16383" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-largest) = -nextUp(largest) = -(inf) = -inf. + let test = unpack!(status=, (-Quad::largest()).next_down()); + let expected = -Quad::INFINITY; + assert_eq!(status, Status::OK); + assert!(test.is_infinite() && test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(smallest) = smallest + inc. + let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "0x0.0000000000000000000000000002p-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(smallest) = -nextUp(-smallest) = -(-0) = +0. + let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382" + .parse::() + .unwrap() + .next_down()); + let expected = Quad::ZERO; + assert_eq!(status, Status::OK); + assert!(test.is_pos_zero()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-smallest) = -0. + let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382" + .parse::() + .unwrap() + .next_up()); + let expected = -Quad::ZERO; + assert_eq!(status, Status::OK); + assert!(test.is_neg_zero()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-smallest) = -nextUp(smallest) = -smallest - inc. + let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "-0x0.0000000000000000000000000002p-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(qNaN) = qNaN + let test = unpack!(status=, Quad::qnan(None).next_up()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(qNaN) = qNaN + let test = unpack!(status=, Quad::qnan(None).next_down()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(sNaN) = qNaN + let test = unpack!(status=, Quad::snan(None).next_up()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::INVALID_OP); + assert!(test.bitwise_eq(expected)); + + // nextDown(sNaN) = qNaN + let test = unpack!(status=, Quad::snan(None).next_down()); + let expected = Quad::qnan(None); + assert_eq!(status, Status::INVALID_OP); + assert!(test.bitwise_eq(expected)); + + // nextUp(+0) = +smallest + let test = unpack!(status=, Quad::ZERO.next_up()); + let expected = Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(+0) = -nextUp(-0) = -smallest + let test = unpack!(status=, Quad::ZERO.next_down()); + let expected = -Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(-0) = +smallest + let test = unpack!(status=, (-Quad::ZERO).next_up()); + let expected = Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-0) = -nextUp(0) = -smallest + let test = unpack!(status=, (-Quad::ZERO).next_down()); + let expected = -Quad::SMALLEST; + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // 2. Binade Boundary Tests. + + // 2a. Test denormal <-> normal binade boundaries. + // * nextUp(+Largest Denormal) -> +Smallest Normal. + // * nextDown(-Largest Denormal) -> -Smallest Normal. + // * nextUp(-Smallest Normal) -> -Largest Denormal. + // * nextDown(+Smallest Normal) -> +Largest Denormal. + + // nextUp(+Largest Denormal) -> +Smallest Normal. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "0x1.0000000000000000000000000000p-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Largest Denormal) -> -Smallest Normal. + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "-0x1.0000000000000000000000000000p-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-Smallest Normal) -> -Largest Denormal. + let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "-0x0.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Smallest Normal) -> +Largest Denormal. + let test = unpack!(status=, "+0x1.0000000000000000000000000000p-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "+0x0.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + // 2b. Test normal <-> normal binade boundaries. + // * nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1. + // * nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1. + // * nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary. + // * nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary. + + // nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1. + let test = unpack!(status=, "-0x1p+1".parse::().unwrap().next_up()); + let expected = "-0x1.ffffffffffffffffffffffffffffp+0" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1. + let test = unpack!(status=, "0x1p+1".parse::().unwrap().next_down()); + let expected = "0x1.ffffffffffffffffffffffffffffp+0" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary. + let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp+0" + .parse::() + .unwrap() + .next_up()); + let expected = "0x1p+1".parse::().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary. + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp+0" + .parse::() + .unwrap() + .next_down()); + let expected = "-0x1p+1".parse::().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // 2c. Test using next at binade boundaries with a direction away from the + // binade boundary. Away from denormal <-> normal boundaries. + // + // This is to make sure that even though we are at a binade boundary, since + // we are rounding away, we do not trigger the binade boundary code. Thus we + // test: + // * nextUp(-Largest Denormal) -> -Largest Denormal + inc. + // * nextDown(+Largest Denormal) -> +Largest Denormal - inc. + // * nextUp(+Smallest Normal) -> +Smallest Normal + inc. + // * nextDown(-Smallest Normal) -> -Smallest Normal - inc. + + // nextUp(-Largest Denormal) -> -Largest Denormal + inc. + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "-0x0.fffffffffffffffffffffffffffep-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Largest Denormal) -> +Largest Denormal - inc. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "0x0.fffffffffffffffffffffffffffep-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(+Smallest Normal) -> +Smallest Normal + inc. + let test = unpack!(status=, "0x1.0000000000000000000000000000p-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "0x1.0000000000000000000000000001p-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Smallest Normal) -> -Smallest Normal - inc. + let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "-0x1.0000000000000000000000000001p-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // 2d. Test values which cause our exponent to go to min exponent. This + // is to ensure that guards in the code to check for min exponent + // trigger properly. + // * nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382 + // * nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) -> + // -0x1p-16381 + // * nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16382 + // * nextDown(0x1p-16382) -> 0x1.ffffffffffffffffffffffffffffp-16382 + + // nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382 + let test = unpack!(status=, "-0x1p-16381".parse::().unwrap().next_up()); + let expected = "-0x1.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) -> + // -0x1p-16381 + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "-0x1p-16381".parse::().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16381 + let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "0x1p-16381".parse::().unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // nextDown(0x1p-16381) -> 0x1.ffffffffffffffffffffffffffffp-16382 + let test = unpack!(status=, "0x1p-16381".parse::().unwrap().next_down()); + let expected = "0x1.ffffffffffffffffffffffffffffp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.bitwise_eq(expected)); + + // 3. Now we test both denormal/normal computation which will not cause us + // to go across binade boundaries. Specifically we test: + // * nextUp(+Denormal) -> +Denormal. + // * nextDown(+Denormal) -> +Denormal. + // * nextUp(-Denormal) -> -Denormal. + // * nextDown(-Denormal) -> -Denormal. + // * nextUp(+Normal) -> +Normal. + // * nextDown(+Normal) -> +Normal. + // * nextUp(-Normal) -> -Normal. + // * nextDown(-Normal) -> -Normal. + + // nextUp(+Denormal) -> +Denormal. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "0x0.ffffffffffffffffffffffff000dp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Denormal) -> +Denormal. + let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "0x0.ffffffffffffffffffffffff000bp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-Denormal) -> -Denormal. + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382" + .parse::() + .unwrap() + .next_up()); + let expected = "-0x0.ffffffffffffffffffffffff000bp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Denormal) -> -Denormal + let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382" + .parse::() + .unwrap() + .next_down()); + let expected = "-0x0.ffffffffffffffffffffffff000dp-16382" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(+Normal) -> +Normal. + let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000" + .parse::() + .unwrap() + .next_up()); + let expected = "0x1.ffffffffffffffffffffffff000dp-16000" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(+Normal) -> +Normal. + let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000" + .parse::() + .unwrap() + .next_down()); + let expected = "0x1.ffffffffffffffffffffffff000bp-16000" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextUp(-Normal) -> -Normal. + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000" + .parse::() + .unwrap() + .next_up()); + let expected = "-0x1.ffffffffffffffffffffffff000bp-16000" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + + // nextDown(-Normal) -> -Normal. + let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000" + .parse::() + .unwrap() + .next_down()); + let expected = "-0x1.ffffffffffffffffffffffff000dp-16000" + .parse::() + .unwrap(); + assert_eq!(status, Status::OK); + assert!(!test.is_denormal()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); +} + +#[test] +fn fma() { + { + let mut f1 = Single::from_f32(14.5); + let f2 = Single::from_f32(-14.5); + let f3 = Single::from_f32(225.0); + f1 = f1.mul_add(f2, f3).value; + assert_eq!(14.75, f1.to_f32()); + } + + { + let val2 = Single::from_f32(2.0); + let mut f1 = Single::from_f32(1.17549435e-38); + let mut f2 = Single::from_f32(1.17549435e-38); + f1 /= val2; + f2 /= val2; + let f3 = Single::from_f32(12.0); + f1 = f1.mul_add(f2, f3).value; + assert_eq!(12.0, f1.to_f32()); + } + + // Test for correct zero sign when answer is exactly zero. + // fma(1.0, -1.0, 1.0) -> +ve 0. + { + let mut f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(-1.0); + let f3 = Double::from_f64(1.0); + f1 = f1.mul_add(f2, f3).value; + assert!(!f1.is_negative() && f1.is_zero()); + } + + // Test for correct zero sign when answer is exactly zero and rounding towards + // negative. + // fma(1.0, -1.0, 1.0) -> +ve 0. + { + let mut f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(-1.0); + let f3 = Double::from_f64(1.0); + f1 = f1.mul_add_r(f2, f3, Round::TowardNegative).value; + assert!(f1.is_negative() && f1.is_zero()); + } + + // Test for correct (in this case -ve) sign when adding like signed zeros. + // Test fma(0.0, -0.0, -0.0) -> -ve 0. + { + let mut f1 = Double::from_f64(0.0); + let f2 = Double::from_f64(-0.0); + let f3 = Double::from_f64(-0.0); + f1 = f1.mul_add(f2, f3).value; + assert!(f1.is_negative() && f1.is_zero()); + } + + // Test -ve sign preservation when small negative results underflow. + { + let mut f1 = "-0x1p-1074".parse::().unwrap(); + let f2 = "+0x1p-1074".parse::().unwrap(); + let f3 = Double::from_f64(0.0); + f1 = f1.mul_add(f2, f3).value; + assert!(f1.is_negative() && f1.is_zero()); + } + + // Test x87 extended precision case from http://llvm.org/PR20728. + { + let mut m1 = X87DoubleExtended::from_u128(1).value; + let m2 = X87DoubleExtended::from_u128(1).value; + let a = X87DoubleExtended::from_u128(3).value; + + let mut loses_info = false; + m1 = m1.mul_add(m2, a).value; + let r: Single = m1.convert(&mut loses_info).value; + assert!(!loses_info); + assert_eq!(4.0, r.to_f32()); + } +} + +#[test] +fn min_num() { + let f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(2.0); + let nan = Double::NAN; + + assert_eq!(1.0, f1.min(f2).to_f64()); + assert_eq!(1.0, f2.min(f1).to_f64()); + assert_eq!(1.0, f1.min(nan).to_f64()); + assert_eq!(1.0, nan.min(f1).to_f64()); +} + +#[test] +fn max_num() { + let f1 = Double::from_f64(1.0); + let f2 = Double::from_f64(2.0); + let nan = Double::NAN; + + assert_eq!(2.0, f1.max(f2).to_f64()); + assert_eq!(2.0, f2.max(f1).to_f64()); + assert_eq!(1.0, f1.max(nan).to_f64()); + assert_eq!(1.0, nan.max(f1).to_f64()); +} + +#[test] +fn denormal() { + // Test single precision + { + assert!(!Single::from_f32(0.0).is_denormal()); + + let mut t = "1.17549435082228750797e-38".parse::().unwrap(); + assert!(!t.is_denormal()); + + let val2 = Single::from_f32(2.0e0); + t /= val2; + assert!(t.is_denormal()); + } + + // Test double precision + { + assert!(!Double::from_f64(0.0).is_denormal()); + + let mut t = "2.22507385850720138309e-308".parse::().unwrap(); + assert!(!t.is_denormal()); + + let val2 = Double::from_f64(2.0e0); + t /= val2; + assert!(t.is_denormal()); + } + + // Test Intel double-ext + { + assert!(!X87DoubleExtended::from_u128(0).value.is_denormal()); + + let mut t = "3.36210314311209350626e-4932" + .parse::() + .unwrap(); + assert!(!t.is_denormal()); + + t /= X87DoubleExtended::from_u128(2).value; + assert!(t.is_denormal()); + } + + // Test quadruple precision + { + assert!(!Quad::from_u128(0).value.is_denormal()); + + let mut t = "3.36210314311209350626267781732175260e-4932" + .parse::() + .unwrap(); + assert!(!t.is_denormal()); + + t /= Quad::from_u128(2).value; + assert!(t.is_denormal()); + } +} + +#[test] +fn decimal_strings_without_null_terminators() { + // Make sure that we can parse strings without null terminators. + // rdar://14323230. + let val = "0.00"[..3].parse::().unwrap(); + assert_eq!(val.to_f64(), 0.0); + let val = "0.01"[..3].parse::().unwrap(); + assert_eq!(val.to_f64(), 0.0); + let val = "0.09"[..3].parse::().unwrap(); + assert_eq!(val.to_f64(), 0.0); + let val = "0.095"[..4].parse::().unwrap(); + assert_eq!(val.to_f64(), 0.09); + let val = "0.00e+3"[..7].parse::().unwrap(); + assert_eq!(val.to_f64(), 0.00); + let val = "0e+3"[..4].parse::().unwrap(); + assert_eq!(val.to_f64(), 0.00); + +} + +#[test] +fn from_zero_decimal_string() { + assert_eq!(0.0, "0".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0.".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.".parse::().unwrap().to_f64()); + + assert_eq!(0.0, ".0".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+.0".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-.0".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0.0".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.0".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "00000.".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+00000.".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-00000.".parse::().unwrap().to_f64()); + + assert_eq!(0.0, ".00000".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+.00000".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-.00000".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0000.00000".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0000.00000".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0000.00000".parse::().unwrap().to_f64()); +} + +#[test] +fn from_zero_decimal_single_exponent_string() { + assert_eq!(0.0, "0e1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0e1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0e1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0e+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0e+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0e+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0e-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0e-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0e-1".parse::().unwrap().to_f64()); + + + assert_eq!(0.0, "0.e1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.e1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.e1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0.e+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.e+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.e+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0.e-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.e-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.e-1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, ".0e1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+.0e1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-.0e1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, ".0e+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+.0e+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-.0e+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, ".0e-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+.0e-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-.0e-1".parse::().unwrap().to_f64()); + + + assert_eq!(0.0, "0.0e1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.0e1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0e1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0.0e+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.0e+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0e+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0.0e-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0.0e-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0.0e-1".parse::().unwrap().to_f64()); + + + assert_eq!(0.0, "000.0000e1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+000.0000e+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-000.0000e+1".parse::().unwrap().to_f64()); +} + +#[test] +fn from_zero_decimal_large_exponent_string() { + assert_eq!(0.0, "0e1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0e1234".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0e1234".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0e+1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0e+1234".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0e+1234".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0e-1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0e-1234".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0e-1234".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "000.0000e1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "000.0000e-1234".parse::().unwrap().to_f64()); +} + +#[test] +fn from_zero_hexadecimal_string() { + assert_eq!(0.0, "0x0p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0p1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x0p+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0p+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x0p-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0p-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p-1".parse::().unwrap().to_f64()); + + + assert_eq!(0.0, "0x0.p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.p1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.p1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.p+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.p+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.p+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.p-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.p-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.p-1".parse::().unwrap().to_f64()); + + + assert_eq!(0.0, "0x.0p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x.0p1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x.0p1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x.0p+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x.0p+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x.0p+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x.0p-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x.0p-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x.0p-1".parse::().unwrap().to_f64()); + + + assert_eq!(0.0, "0x0.0p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.0p1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.0p1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.0p+1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.0p+1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.0p+1".parse::().unwrap().to_f64()); + + assert_eq!(0.0, "0x0.0p-1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "+0x0.0p-1".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0.0p-1".parse::().unwrap().to_f64()); + + + assert_eq!(0.0, "0x00000.p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x0000.00000p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x.00000p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x0.p1".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x0p1234".parse::().unwrap().to_f64()); + assert_eq!(-0.0, "-0x0p1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x00000.p1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x0000.00000p1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x.00000p1234".parse::().unwrap().to_f64()); + assert_eq!(0.0, "0x0.p1234".parse::().unwrap().to_f64()); +} + +#[test] +fn from_decimal_string() { + assert_eq!(1.0, "1".parse::().unwrap().to_f64()); + assert_eq!(2.0, "2.".parse::().unwrap().to_f64()); + assert_eq!(0.5, ".5".parse::().unwrap().to_f64()); + assert_eq!(1.0, "1.0".parse::().unwrap().to_f64()); + assert_eq!(-2.0, "-2".parse::().unwrap().to_f64()); + assert_eq!(-4.0, "-4.".parse::().unwrap().to_f64()); + assert_eq!(-0.5, "-.5".parse::().unwrap().to_f64()); + assert_eq!(-1.5, "-1.5".parse::().unwrap().to_f64()); + assert_eq!(1.25e12, "1.25e12".parse::().unwrap().to_f64()); + assert_eq!(1.25e+12, "1.25e+12".parse::().unwrap().to_f64()); + assert_eq!(1.25e-12, "1.25e-12".parse::().unwrap().to_f64()); + assert_eq!(1024.0, "1024.".parse::().unwrap().to_f64()); + assert_eq!(1024.05, "1024.05000".parse::().unwrap().to_f64()); + assert_eq!(0.05, ".05000".parse::().unwrap().to_f64()); + assert_eq!(2.0, "2.".parse::().unwrap().to_f64()); + assert_eq!(2.0e2, "2.e2".parse::().unwrap().to_f64()); + assert_eq!(2.0e+2, "2.e+2".parse::().unwrap().to_f64()); + assert_eq!(2.0e-2, "2.e-2".parse::().unwrap().to_f64()); + assert_eq!(2.05e2, "002.05000e2".parse::().unwrap().to_f64()); + assert_eq!(2.05e+2, "002.05000e+2".parse::().unwrap().to_f64()); + assert_eq!(2.05e-2, "002.05000e-2".parse::().unwrap().to_f64()); + assert_eq!(2.05e12, "002.05000e12".parse::().unwrap().to_f64()); + assert_eq!( + 2.05e+12, + "002.05000e+12".parse::().unwrap().to_f64() + ); + assert_eq!( + 2.05e-12, + "002.05000e-12".parse::().unwrap().to_f64() + ); + + // These are "carefully selected" to overflow the fast log-base + // calculations in the implementation. + assert!("99e99999".parse::().unwrap().is_infinite()); + assert!("-99e99999".parse::().unwrap().is_infinite()); + assert!("1e-99999".parse::().unwrap().is_pos_zero()); + assert!("-1e-99999".parse::().unwrap().is_neg_zero()); + + assert_eq!(2.71828, "2.71828".parse::().unwrap().to_f64()); +} + +#[test] +fn from_hexadecimal_string() { + assert_eq!(1.0, "0x1p0".parse::().unwrap().to_f64()); + assert_eq!(1.0, "+0x1p0".parse::().unwrap().to_f64()); + assert_eq!(-1.0, "-0x1p0".parse::().unwrap().to_f64()); + + assert_eq!(1.0, "0x1p+0".parse::().unwrap().to_f64()); + assert_eq!(1.0, "+0x1p+0".parse::().unwrap().to_f64()); + assert_eq!(-1.0, "-0x1p+0".parse::().unwrap().to_f64()); + + assert_eq!(1.0, "0x1p-0".parse::().unwrap().to_f64()); + assert_eq!(1.0, "+0x1p-0".parse::().unwrap().to_f64()); + assert_eq!(-1.0, "-0x1p-0".parse::().unwrap().to_f64()); + + + assert_eq!(2.0, "0x1p1".parse::().unwrap().to_f64()); + assert_eq!(2.0, "+0x1p1".parse::().unwrap().to_f64()); + assert_eq!(-2.0, "-0x1p1".parse::().unwrap().to_f64()); + + assert_eq!(2.0, "0x1p+1".parse::().unwrap().to_f64()); + assert_eq!(2.0, "+0x1p+1".parse::().unwrap().to_f64()); + assert_eq!(-2.0, "-0x1p+1".parse::().unwrap().to_f64()); + + assert_eq!(0.5, "0x1p-1".parse::().unwrap().to_f64()); + assert_eq!(0.5, "+0x1p-1".parse::().unwrap().to_f64()); + assert_eq!(-0.5, "-0x1p-1".parse::().unwrap().to_f64()); + + + assert_eq!(3.0, "0x1.8p1".parse::().unwrap().to_f64()); + assert_eq!(3.0, "+0x1.8p1".parse::().unwrap().to_f64()); + assert_eq!(-3.0, "-0x1.8p1".parse::().unwrap().to_f64()); + + assert_eq!(3.0, "0x1.8p+1".parse::().unwrap().to_f64()); + assert_eq!(3.0, "+0x1.8p+1".parse::().unwrap().to_f64()); + assert_eq!(-3.0, "-0x1.8p+1".parse::().unwrap().to_f64()); + + assert_eq!(0.75, "0x1.8p-1".parse::().unwrap().to_f64()); + assert_eq!(0.75, "+0x1.8p-1".parse::().unwrap().to_f64()); + assert_eq!(-0.75, "-0x1.8p-1".parse::().unwrap().to_f64()); + + + assert_eq!(8192.0, "0x1000.000p1".parse::().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000.000p1".parse::().unwrap().to_f64()); + assert_eq!(-8192.0, "-0x1000.000p1".parse::().unwrap().to_f64()); + + assert_eq!(8192.0, "0x1000.000p+1".parse::().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000.000p+1".parse::().unwrap().to_f64()); + assert_eq!( + -8192.0, + "-0x1000.000p+1".parse::().unwrap().to_f64() + ); + + assert_eq!(2048.0, "0x1000.000p-1".parse::().unwrap().to_f64()); + assert_eq!(2048.0, "+0x1000.000p-1".parse::().unwrap().to_f64()); + assert_eq!( + -2048.0, + "-0x1000.000p-1".parse::().unwrap().to_f64() + ); + + + assert_eq!(8192.0, "0x1000p1".parse::().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000p1".parse::().unwrap().to_f64()); + assert_eq!(-8192.0, "-0x1000p1".parse::().unwrap().to_f64()); + + assert_eq!(8192.0, "0x1000p+1".parse::().unwrap().to_f64()); + assert_eq!(8192.0, "+0x1000p+1".parse::().unwrap().to_f64()); + assert_eq!(-8192.0, "-0x1000p+1".parse::().unwrap().to_f64()); + + assert_eq!(2048.0, "0x1000p-1".parse::().unwrap().to_f64()); + assert_eq!(2048.0, "+0x1000p-1".parse::().unwrap().to_f64()); + assert_eq!(-2048.0, "-0x1000p-1".parse::().unwrap().to_f64()); + + + assert_eq!(16384.0, "0x10p10".parse::().unwrap().to_f64()); + assert_eq!(16384.0, "+0x10p10".parse::().unwrap().to_f64()); + assert_eq!(-16384.0, "-0x10p10".parse::().unwrap().to_f64()); + + assert_eq!(16384.0, "0x10p+10".parse::().unwrap().to_f64()); + assert_eq!(16384.0, "+0x10p+10".parse::().unwrap().to_f64()); + assert_eq!(-16384.0, "-0x10p+10".parse::().unwrap().to_f64()); + + assert_eq!(0.015625, "0x10p-10".parse::().unwrap().to_f64()); + assert_eq!(0.015625, "+0x10p-10".parse::().unwrap().to_f64()); + assert_eq!(-0.015625, "-0x10p-10".parse::().unwrap().to_f64()); + + assert_eq!(1.0625, "0x1.1p0".parse::().unwrap().to_f64()); + assert_eq!(1.0, "0x1p0".parse::().unwrap().to_f64()); + + assert_eq!( + "0x1p-150".parse::().unwrap().to_f64(), + "+0x800000000000000001.p-221" + .parse::() + .unwrap() + .to_f64() + ); + assert_eq!( + 2251799813685248.5, + "0x80000000000004000000.010p-28" + .parse::() + .unwrap() + .to_f64() + ); +} + +#[test] +fn to_string() { + let to_string = |d: f64, precision: usize, width: usize| { + let x = Double::from_f64(d); + if precision == 0 { + format!("{:1$}", x, width) + } else { + format!("{:2$.1$}", x, precision, width) + } + }; + assert_eq!("10", to_string(10.0, 6, 3)); + assert_eq!("1.0E+1", to_string(10.0, 6, 0)); + assert_eq!("10100", to_string(1.01E+4, 5, 2)); + assert_eq!("1.01E+4", to_string(1.01E+4, 4, 2)); + assert_eq!("1.01E+4", to_string(1.01E+4, 5, 1)); + assert_eq!("0.0101", to_string(1.01E-2, 5, 2)); + assert_eq!("0.0101", to_string(1.01E-2, 4, 2)); + assert_eq!("1.01E-2", to_string(1.01E-2, 5, 1)); + assert_eq!( + "0.78539816339744828", + to_string(0.78539816339744830961, 0, 3) + ); + assert_eq!( + "4.9406564584124654E-324", + to_string(4.9406564584124654e-324, 0, 3) + ); + assert_eq!("873.18340000000001", to_string(873.1834, 0, 1)); + assert_eq!("8.7318340000000001E+2", to_string(873.1834, 0, 0)); + assert_eq!( + "1.7976931348623157E+308", + to_string(1.7976931348623157E+308, 0, 0) + ); + + let to_string = |d: f64, precision: usize, width: usize| { + let x = Double::from_f64(d); + if precision == 0 { + format!("{:#1$}", x, width) + } else { + format!("{:#2$.1$}", x, precision, width) + } + }; + assert_eq!("10", to_string(10.0, 6, 3)); + assert_eq!("1.000000e+01", to_string(10.0, 6, 0)); + assert_eq!("10100", to_string(1.01E+4, 5, 2)); + assert_eq!("1.0100e+04", to_string(1.01E+4, 4, 2)); + assert_eq!("1.01000e+04", to_string(1.01E+4, 5, 1)); + assert_eq!("0.0101", to_string(1.01E-2, 5, 2)); + assert_eq!("0.0101", to_string(1.01E-2, 4, 2)); + assert_eq!("1.01000e-02", to_string(1.01E-2, 5, 1)); + assert_eq!( + "0.78539816339744828", + to_string(0.78539816339744830961, 0, 3) + ); + assert_eq!( + "4.94065645841246540e-324", + to_string(4.9406564584124654e-324, 0, 3) + ); + assert_eq!("873.18340000000001", to_string(873.1834, 0, 1)); + assert_eq!("8.73183400000000010e+02", to_string(873.1834, 0, 0)); + assert_eq!( + "1.79769313486231570e+308", + to_string(1.7976931348623157E+308, 0, 0) + ); +} + +#[test] +fn to_integer() { + let mut is_exact = false; + + assert_eq!( + Status::OK.and(10), + "10".parse::().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(is_exact); + + assert_eq!( + Status::INVALID_OP.and(0), + "-10".parse::().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::INVALID_OP.and(31), + "32".parse::().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::INEXACT.and(7), + "7.9".parse::().unwrap().to_u128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::OK.and(-10), + "-10".parse::().unwrap().to_i128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(is_exact); + + assert_eq!( + Status::INVALID_OP.and(-16), + "-17".parse::().unwrap().to_i128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); + + assert_eq!( + Status::INVALID_OP.and(15), + "16".parse::().unwrap().to_i128_r( + 5, + Round::TowardZero, + &mut is_exact, + ) + ); + assert!(!is_exact); +} + +#[test] +fn nan() { + fn nanbits(signaling: bool, negative: bool, fill: u128) -> u128 { + let x = if signaling { + T::snan(Some(fill)) + } else { + T::qnan(Some(fill)) + }; + if negative { + (-x).to_bits() + } else { + x.to_bits() + } + } + + assert_eq!(0x7fc00000, nanbits::(false, false, 0)); + assert_eq!(0xffc00000, nanbits::(false, true, 0)); + assert_eq!(0x7fc0ae72, nanbits::(false, false, 0xae72)); + assert_eq!(0x7fffae72, nanbits::(false, false, 0xffffae72)); + assert_eq!(0x7fa00000, nanbits::(true, false, 0)); + assert_eq!(0xffa00000, nanbits::(true, true, 0)); + assert_eq!(0x7f80ae72, nanbits::(true, false, 0xae72)); + assert_eq!(0x7fbfae72, nanbits::(true, false, 0xffffae72)); + + assert_eq!(0x7ff8000000000000, nanbits::(false, false, 0)); + assert_eq!(0xfff8000000000000, nanbits::(false, true, 0)); + assert_eq!(0x7ff800000000ae72, nanbits::(false, false, 0xae72)); + assert_eq!( + 0x7fffffffffffae72, + nanbits::(false, false, 0xffffffffffffae72) + ); + assert_eq!(0x7ff4000000000000, nanbits::(true, false, 0)); + assert_eq!(0xfff4000000000000, nanbits::(true, true, 0)); + assert_eq!(0x7ff000000000ae72, nanbits::(true, false, 0xae72)); + assert_eq!( + 0x7ff7ffffffffae72, + nanbits::(true, false, 0xffffffffffffae72) + ); +} + +#[test] +fn string_decimal_death() { + assert_eq!( + "".parse::(), + Err(ParseError("Invalid string length")) + ); + assert_eq!( + "+".parse::(), + Err(ParseError("String has no digits")) + ); + assert_eq!( + "-".parse::(), + Err(ParseError("String has no digits")) + ); + + assert_eq!( + "\0".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1\0".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1\02".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1\02e1".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "1e\0".parse::(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "1e1\0".parse::(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "1e1\02".parse::(), + Err(ParseError("Invalid character in exponent")) + ); + + assert_eq!( + "1.0f".parse::(), + Err(ParseError("Invalid character in significand")) + ); + + assert_eq!( + "..".parse::(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "..0".parse::(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "1.0.0".parse::(), + Err(ParseError("String contains multiple dots")) + ); +} + +#[test] +fn string_decimal_significand_death() { + assert_eq!( + ".".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+.".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-.".parse::(), + Err(ParseError("Significand has no digits")) + ); + + + assert_eq!( + "e".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+e".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-e".parse::(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "e1".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+e1".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-e1".parse::(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + ".e1".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+.e1".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-.e1".parse::(), + Err(ParseError("Significand has no digits")) + ); + + + assert_eq!( + ".e".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+.e".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-.e".parse::(), + Err(ParseError("Significand has no digits")) + ); +} + +#[test] +fn string_decimal_exponent_death() { + assert_eq!( + "1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "1.e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+1.e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-1.e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + ".1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+.1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-.1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "1.1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+1.1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-1.1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "1e+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "1e-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + ".1e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + ".1e+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + ".1e-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "1.0e".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "1.0e+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "1.0e-".parse::(), + Err(ParseError("Exponent has no digits")) + ); +} + +#[test] +fn string_hexadecimal_death() { + assert_eq!("0x".parse::(), Err(ParseError("Invalid string"))); + assert_eq!("+0x".parse::(), Err(ParseError("Invalid string"))); + assert_eq!("-0x".parse::(), Err(ParseError("Invalid string"))); + + assert_eq!( + "0x0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x0.".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x0.".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x0.".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x.0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x.0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x.0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x0.0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "+0x0.0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + assert_eq!( + "-0x0.0".parse::(), + Err(ParseError("Hex strings require an exponent")) + ); + + assert_eq!( + "0x\0".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1\0".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1\02".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1\02p1".parse::(), + Err(ParseError("Invalid character in significand")) + ); + assert_eq!( + "0x1p\0".parse::(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "0x1p1\0".parse::(), + Err(ParseError("Invalid character in exponent")) + ); + assert_eq!( + "0x1p1\02".parse::(), + Err(ParseError("Invalid character in exponent")) + ); + + assert_eq!( + "0x1p0f".parse::(), + Err(ParseError("Invalid character in exponent")) + ); + + assert_eq!( + "0x..p1".parse::(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "0x..0p1".parse::(), + Err(ParseError("String contains multiple dots")) + ); + assert_eq!( + "0x1.0.0p1".parse::(), + Err(ParseError("String contains multiple dots")) + ); +} + +#[test] +fn string_hexadecimal_significand_death() { + assert_eq!( + "0x.".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.".parse::(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0xp".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0xp".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0xp".parse::(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0xp+".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0xp+".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0xp+".parse::(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0xp-".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0xp-".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0xp-".parse::(), + Err(ParseError("Significand has no digits")) + ); + + + assert_eq!( + "0x.p".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.p".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.p".parse::(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0x.p+".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.p+".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.p+".parse::(), + Err(ParseError("Significand has no digits")) + ); + + assert_eq!( + "0x.p-".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "+0x.p-".parse::(), + Err(ParseError("Significand has no digits")) + ); + assert_eq!( + "-0x.p-".parse::(), + Err(ParseError("Significand has no digits")) + ); +} + +#[test] +fn string_hexadecimal_exponent_death() { + assert_eq!( + "0x1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "0x1.p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "0x.1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x.1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x.1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x.1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x.1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x.1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x.1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x.1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x.1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + + assert_eq!( + "0x1.1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.1p".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.1p+".parse::(), + Err(ParseError("Exponent has no digits")) + ); + + assert_eq!( + "0x1.1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "+0x1.1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); + assert_eq!( + "-0x1.1p-".parse::(), + Err(ParseError("Exponent has no digits")) + ); +} + +#[test] +fn exact_inverse() { + // Trivial operation. + assert!( + Double::from_f64(2.0) + .get_exact_inverse() + .unwrap() + .bitwise_eq(Double::from_f64(0.5)) + ); + assert!( + Single::from_f32(2.0) + .get_exact_inverse() + .unwrap() + .bitwise_eq(Single::from_f32(0.5)) + ); + assert!( + "2.0" + .parse::() + .unwrap() + .get_exact_inverse() + .unwrap() + .bitwise_eq("0.5".parse::().unwrap()) + ); + assert!( + "2.0" + .parse::() + .unwrap() + .get_exact_inverse() + .unwrap() + .bitwise_eq("0.5".parse::().unwrap()) + ); + + // FLT_MIN + assert!( + Single::from_f32(1.17549435e-38) + .get_exact_inverse() + .unwrap() + .bitwise_eq(Single::from_f32(8.5070592e+37)) + ); + + // Large float, inverse is a denormal. + assert!(Single::from_f32(1.7014118e38).get_exact_inverse().is_none()); + // Zero + assert!(Double::from_f64(0.0).get_exact_inverse().is_none()); + // Denormalized float + assert!( + Single::from_f32(1.40129846e-45) + .get_exact_inverse() + .is_none() + ); +} + +#[test] +fn round_to_integral() { + let t = Double::from_f64(-0.5); + assert_eq!(-0.0, t.round_to_integral(Round::TowardZero).value.to_f64()); + assert_eq!( + -1.0, + t.round_to_integral(Round::TowardNegative).value.to_f64() + ); + assert_eq!( + -0.0, + t.round_to_integral(Round::TowardPositive).value.to_f64() + ); + assert_eq!( + -0.0, + t.round_to_integral(Round::NearestTiesToEven).value.to_f64() + ); + + let s = Double::from_f64(3.14); + assert_eq!(3.0, s.round_to_integral(Round::TowardZero).value.to_f64()); + assert_eq!( + 3.0, + s.round_to_integral(Round::TowardNegative).value.to_f64() + ); + assert_eq!( + 4.0, + s.round_to_integral(Round::TowardPositive).value.to_f64() + ); + assert_eq!( + 3.0, + s.round_to_integral(Round::NearestTiesToEven).value.to_f64() + ); + + let r = Double::largest(); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::TowardZero).value.to_f64() + ); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::TowardNegative).value.to_f64() + ); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::TowardPositive).value.to_f64() + ); + assert_eq!( + r.to_f64(), + r.round_to_integral(Round::NearestTiesToEven).value.to_f64() + ); + + let p = Double::ZERO.round_to_integral(Round::TowardZero).value; + assert_eq!(0.0, p.to_f64()); + let p = (-Double::ZERO).round_to_integral(Round::TowardZero).value; + assert_eq!(-0.0, p.to_f64()); + let p = Double::NAN.round_to_integral(Round::TowardZero).value; + assert!(p.to_f64().is_nan()); + let p = Double::INFINITY.round_to_integral(Round::TowardZero).value; + assert!(p.to_f64().is_infinite() && p.to_f64() > 0.0); + let p = (-Double::INFINITY) + .round_to_integral(Round::TowardZero) + .value; + assert!(p.to_f64().is_infinite() && p.to_f64() < 0.0); +} + +#[test] +fn is_integer() { + let t = Double::from_f64(-0.0); + assert!(t.is_integer()); + let t = Double::from_f64(3.14159); + assert!(!t.is_integer()); + let t = Double::NAN; + assert!(!t.is_integer()); + let t = Double::INFINITY; + assert!(!t.is_integer()); + let t = -Double::INFINITY; + assert!(!t.is_integer()); + let t = Double::largest(); + assert!(t.is_integer()); +} + +#[test] +fn largest() { + assert_eq!(3.402823466e+38, Single::largest().to_f32()); + assert_eq!(1.7976931348623158e+308, Double::largest().to_f64()); +} + +#[test] +fn smallest() { + let test = Single::SMALLEST; + let expected = "0x0.000002p-126".parse::().unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Single::SMALLEST; + let expected = "-0x0.000002p-126".parse::().unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = Quad::SMALLEST; + let expected = "0x0.0000000000000000000000000001p-16382" + .parse::() + .unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Quad::SMALLEST; + let expected = "-0x0.0000000000000000000000000001p-16382" + .parse::() + .unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(test.is_denormal()); + assert!(test.bitwise_eq(expected)); +} + +#[test] +fn smallest_normalized() { + let test = Single::smallest_normalized(); + let expected = "0x1p-126".parse::().unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Single::smallest_normalized(); + let expected = "-0x1p-126".parse::().unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = Quad::smallest_normalized(); + let expected = "0x1p-16382".parse::().unwrap(); + assert!(!test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); + + let test = -Quad::smallest_normalized(); + let expected = "-0x1p-16382".parse::().unwrap(); + assert!(test.is_negative()); + assert!(test.is_finite_non_zero()); + assert!(!test.is_denormal()); + assert!(test.bitwise_eq(expected)); +} + +#[test] +fn zero() { + assert_eq!(0.0, Single::from_f32(0.0).to_f32()); + assert_eq!(-0.0, Single::from_f32(-0.0).to_f32()); + assert!(Single::from_f32(-0.0).is_negative()); + + assert_eq!(0.0, Double::from_f64(0.0).to_f64()); + assert_eq!(-0.0, Double::from_f64(-0.0).to_f64()); + assert!(Double::from_f64(-0.0).is_negative()); + + fn test(sign: bool, bits: u128) { + let test = if sign { -T::ZERO } else { T::ZERO }; + let pattern = if sign { "-0x0p+0" } else { "0x0p+0" }; + let expected = pattern.parse::().unwrap(); + assert!(test.is_zero()); + assert_eq!(sign, test.is_negative()); + assert!(test.bitwise_eq(expected)); + assert_eq!(bits, test.to_bits()); + } + test::(false, 0); + test::(true, 0x8000); + test::(false, 0); + test::(true, 0x80000000); + test::(false, 0); + test::(true, 0x8000000000000000); + test::(false, 0); + test::(true, 0x8000000000000000_0000000000000000); + test::(false, 0); + test::(true, 0x8000_0000000000000000); +} + +#[test] +fn copy_sign() { + assert!(Double::from_f64(-42.0).bitwise_eq( + Double::from_f64(42.0).copy_sign( + Double::from_f64(-1.0), + ), + )); + assert!(Double::from_f64(42.0).bitwise_eq( + Double::from_f64(-42.0).copy_sign( + Double::from_f64(1.0), + ), + )); + assert!(Double::from_f64(-42.0).bitwise_eq( + Double::from_f64(-42.0).copy_sign( + Double::from_f64(-1.0), + ), + )); + assert!(Double::from_f64(42.0).bitwise_eq( + Double::from_f64(42.0).copy_sign( + Double::from_f64(1.0), + ), + )); +} + +#[test] +fn convert() { + let mut loses_info = false; + let test = "1.0".parse::().unwrap(); + let test: Single = test.convert(&mut loses_info).value; + assert_eq!(1.0, test.to_f32()); + assert!(!loses_info); + + let mut test = "0x1p-53".parse::().unwrap(); + let one = "1.0".parse::().unwrap(); + test += one; + let test: Double = test.convert(&mut loses_info).value; + assert_eq!(1.0, test.to_f64()); + assert!(loses_info); + + let mut test = "0x1p-53".parse::().unwrap(); + let one = "1.0".parse::().unwrap(); + test += one; + let test: Double = test.convert(&mut loses_info).value; + assert_eq!(1.0, test.to_f64()); + assert!(loses_info); + + let test = "0xf.fffffffp+28".parse::().unwrap(); + let test: Double = test.convert(&mut loses_info).value; + assert_eq!(4294967295.0, test.to_f64()); + assert!(!loses_info); + + let test = Single::snan(None); + let x87_snan = X87DoubleExtended::snan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_snan)); + assert!(!loses_info); + + let test = Single::qnan(None); + let x87_qnan = X87DoubleExtended::qnan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_qnan)); + assert!(!loses_info); + + let test = X87DoubleExtended::snan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_snan)); + assert!(!loses_info); + + let test = X87DoubleExtended::qnan(None); + let test: X87DoubleExtended = test.convert(&mut loses_info).value; + assert!(test.bitwise_eq(x87_qnan)); + assert!(!loses_info); +} + +#[test] +fn is_negative() { + let t = "0x1p+0".parse::().unwrap(); + assert!(!t.is_negative()); + let t = "-0x1p+0".parse::().unwrap(); + assert!(t.is_negative()); + + assert!(!Single::INFINITY.is_negative()); + assert!((-Single::INFINITY).is_negative()); + + assert!(!Single::ZERO.is_negative()); + assert!((-Single::ZERO).is_negative()); + + assert!(!Single::NAN.is_negative()); + assert!((-Single::NAN).is_negative()); + + assert!(!Single::snan(None).is_negative()); + assert!((-Single::snan(None)).is_negative()); +} + +#[test] +fn is_normal() { + let t = "0x1p+0".parse::().unwrap(); + assert!(t.is_normal()); + + assert!(!Single::INFINITY.is_normal()); + assert!(!Single::ZERO.is_normal()); + assert!(!Single::NAN.is_normal()); + assert!(!Single::snan(None).is_normal()); + assert!(!"0x1p-149".parse::().unwrap().is_normal()); +} + +#[test] +fn is_finite() { + let t = "0x1p+0".parse::().unwrap(); + assert!(t.is_finite()); + assert!(!Single::INFINITY.is_finite()); + assert!(Single::ZERO.is_finite()); + assert!(!Single::NAN.is_finite()); + assert!(!Single::snan(None).is_finite()); + assert!("0x1p-149".parse::().unwrap().is_finite()); +} + +#[test] +fn is_infinite() { + let t = "0x1p+0".parse::().unwrap(); + assert!(!t.is_infinite()); + assert!(Single::INFINITY.is_infinite()); + assert!(!Single::ZERO.is_infinite()); + assert!(!Single::NAN.is_infinite()); + assert!(!Single::snan(None).is_infinite()); + assert!(!"0x1p-149".parse::().unwrap().is_infinite()); +} + +#[test] +fn is_nan() { + let t = "0x1p+0".parse::().unwrap(); + assert!(!t.is_nan()); + assert!(!Single::INFINITY.is_nan()); + assert!(!Single::ZERO.is_nan()); + assert!(Single::NAN.is_nan()); + assert!(Single::snan(None).is_nan()); + assert!(!"0x1p-149".parse::().unwrap().is_nan()); +} + +#[test] +fn is_finite_non_zero() { + // Test positive/negative normal value. + assert!("0x1p+0".parse::().unwrap().is_finite_non_zero()); + assert!("-0x1p+0".parse::().unwrap().is_finite_non_zero()); + + // Test positive/negative denormal value. + assert!("0x1p-149".parse::().unwrap().is_finite_non_zero()); + assert!("-0x1p-149".parse::().unwrap().is_finite_non_zero()); + + // Test +/- Infinity. + assert!(!Single::INFINITY.is_finite_non_zero()); + assert!(!(-Single::INFINITY).is_finite_non_zero()); + + // Test +/- Zero. + assert!(!Single::ZERO.is_finite_non_zero()); + assert!(!(-Single::ZERO).is_finite_non_zero()); + + // Test +/- qNaN. +/- dont mean anything with qNaN but paranoia can't hurt in + // this instance. + assert!(!Single::NAN.is_finite_non_zero()); + assert!(!(-Single::NAN).is_finite_non_zero()); + + // Test +/- sNaN. +/- dont mean anything with sNaN but paranoia can't hurt in + // this instance. + assert!(!Single::snan(None).is_finite_non_zero()); + assert!(!(-Single::snan(None)).is_finite_non_zero()); +} + +#[test] +fn add() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::().unwrap(); + let m_normal_value = "-0x1p+0".parse::().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "inf", Status::OK, Category::Infinity), + (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, p_zero, "inf", Status::OK, Category::Infinity), + (p_inf, m_zero, "inf", Status::OK, Category::Infinity), + (p_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, m_inf, "-inf", Status::OK, Category::Infinity), + (m_inf, p_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, m_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "inf", Status::OK, Category::Infinity), + (p_zero, m_inf, "-inf", Status::OK, Category::Infinity), + (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_zero, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + (m_zero, p_inf, "inf", Status::OK, Category::Infinity), + (m_zero, m_inf, "-inf", Status::OK, Category::Infinity), + (m_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (m_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity), + ( + p_normal_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_normal_value, + p_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + (p_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x1p+1", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_normal_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + (m_normal_value, p_inf, "inf", Status::OK, Category::Infinity), + ( + m_normal_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_normal_value, + p_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + (m_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_normal_value, + m_normal_value, + "-0x1p+1", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + p_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (p_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + p_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + p_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (m_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_largest_value, + m_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + p_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + (p_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_value, + "0x1p-148", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + p_smallest_normalized, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_normalized, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + p_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + (m_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + m_smallest_value, + "-0x1p-148", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_normalized, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_normalized, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + p_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_value, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x1p-125", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + p_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_value, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "-0x1p-125", + Status::OK, + Category::Normal, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x + y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::().unwrap())); + } +} + +#[test] +fn subtract() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::().unwrap(); + let m_normal_value = "-0x1p+0".parse::().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, m_inf, "inf", Status::OK, Category::Infinity), + (p_inf, p_zero, "inf", Status::OK, Category::Infinity), + (p_inf, m_zero, "inf", Status::OK, Category::Infinity), + (p_inf, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "-inf", Status::OK, Category::Infinity), + (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, p_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, m_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "-inf", Status::OK, Category::Infinity), + (p_zero, m_inf, "inf", Status::OK, Category::Infinity), + (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_zero, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_zero, + p_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_zero, + m_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + (m_zero, p_inf, "-inf", Status::OK, Category::Infinity), + (m_zero, m_inf, "inf", Status::OK, Category::Infinity), + (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (m_zero, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_zero, + p_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_zero, + m_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_normal_value, m_inf, "inf", Status::OK, Category::Infinity), + ( + p_normal_value, + p_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_zero, + "0x1p+0", + Status::OK, + Category::Normal, + ), + (p_normal_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_normal_value, + m_normal_value, + "0x1p+1", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity), + ( + m_normal_value, + p_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_zero, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + (m_normal_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "-0x1p+1", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_normal_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_normal_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + p_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_zero, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (p_largest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + m_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + p_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_normalized, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + p_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_zero, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + (m_largest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_largest_value, + p_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_normalized, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + p_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_zero, + "0x1p-149", + Status::OK, + Category::Normal, + ), + (p_smallest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + m_smallest_value, + "0x1p-148", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_normalized, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_normalized, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + p_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_zero, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + (m_smallest_value, qnan, "-nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_value, + "-0x1p-148", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + p_smallest_normalized, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_normalized, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + p_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_zero, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + qnan, + "-nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_value, + "0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "0x1p-125", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + p_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_zero, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + qnan, + "-nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "-0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "0x1p+0", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "-0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + m_largest_value, + "0x1.fffffep+127", + Status::INEXACT, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x1.000002p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_value, + "-0x1.fffffcp-127", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "-0x1p-125", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x - y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::().unwrap())); + } +} + +#[test] +fn multiply() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::().unwrap(); + let m_normal_value = "-0x1p+0".parse::().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + let underflow_status = Status::UNDERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "inf", Status::OK, Category::Infinity), + (p_inf, m_inf, "-inf", Status::OK, Category::Infinity), + (p_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "-inf", Status::OK, Category::Infinity), + (m_inf, m_inf, "inf", Status::OK, Category::Infinity), + (m_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero), + (p_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + p_zero, + m_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero), + (m_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + m_zero, + p_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity), + ( + p_normal_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (p_normal_value, p_zero, "0x0p+0", Status::OK, Category::Zero), + ( + p_normal_value, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (p_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity), + ( + m_normal_value, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_normal_value, m_zero, "0x0p+0", Status::OK, Category::Zero), + (m_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_largest_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_largest_value, + p_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (p_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + p_smallest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_normalized, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_smallest_normalized, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_largest_value, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_largest_value, + m_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (m_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_largest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_smallest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_normalized, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_smallest_normalized, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_value, + p_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (p_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_largest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + m_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + p_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + m_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_value, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + m_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (m_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "-0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_largest_value, + "0x1.fffffep-22", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + m_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + p_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + m_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + p_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_smallest_normalized, + p_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + m_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_largest_value, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + m_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + p_inf, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_inf, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_smallest_normalized, + p_zero, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + m_zero, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "-0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_largest_value, + "0x1.fffffep+1", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + m_smallest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x * y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::().unwrap())); + } +} + +#[test] +fn divide() { + // Test Special Cases against each other and normal values. + + // FIXMES/NOTES: + // 1. Since we perform only default exception handling all operations with + // signaling NaNs should have a result that is a quiet NaN. Currently they + // return sNaN. + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let qnan = Single::NAN; + let p_normal_value = "0x1p+0".parse::().unwrap(); + let m_normal_value = "-0x1p+0".parse::().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + let overflow_status = Status::OVERFLOW | Status::INEXACT; + let underflow_status = Status::UNDERFLOW | Status::INEXACT; + + let special_cases = [ + (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (p_inf, p_zero, "inf", Status::OK, Category::Infinity), + (p_inf, m_zero, "-inf", Status::OK, Category::Infinity), + (p_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity), + ( + p_inf, + m_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + p_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + ( + p_inf, + m_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN), + (m_inf, p_zero, "-inf", Status::OK, Category::Infinity), + (m_inf, m_zero, "inf", Status::OK, Category::Infinity), + (m_inf, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_inf, + p_normal_value, + "-inf", + Status::OK, + Category::Infinity, + ), + (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity), + ( + m_inf, + p_largest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_largest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_value, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_value, + "inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + p_smallest_normalized, + "-inf", + Status::OK, + Category::Infinity, + ), + ( + m_inf, + m_smallest_normalized, + "inf", + Status::OK, + Category::Infinity, + ), + (p_zero, p_inf, "0x0p+0", Status::OK, Category::Zero), + (p_zero, m_inf, "-0x0p+0", Status::OK, Category::Zero), + (p_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (p_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + p_zero, + m_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + p_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_zero, + m_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, p_inf, "-0x0p+0", Status::OK, Category::Zero), + (m_zero, m_inf, "0x0p+0", Status::OK, Category::Zero), + (m_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN), + (m_zero, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_zero, + p_normal_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero), + ( + m_zero, + p_largest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_largest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_value, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_value, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + p_smallest_normalized, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_zero, + m_smallest_normalized, + "0x0p+0", + Status::OK, + Category::Zero, + ), + (qnan, p_inf, "nan", Status::OK, Category::NaN), + (qnan, m_inf, "nan", Status::OK, Category::NaN), + (qnan, p_zero, "nan", Status::OK, Category::NaN), + (qnan, m_zero, "nan", Status::OK, Category::NaN), + (qnan, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(qnan, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + (qnan, p_normal_value, "nan", Status::OK, Category::NaN), + (qnan, m_normal_value, "nan", Status::OK, Category::NaN), + (qnan, p_largest_value, "nan", Status::OK, Category::NaN), + (qnan, m_largest_value, "nan", Status::OK, Category::NaN), + (qnan, p_smallest_value, "nan", Status::OK, Category::NaN), + (qnan, m_smallest_value, "nan", Status::OK, Category::NaN), + ( + qnan, + p_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + ( + qnan, + m_smallest_normalized, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN), +(snan, qnan, "nan", Status::INVALID_OP, Category::NaN), +(snan, snan, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN), +(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), +(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN), + */ + (p_normal_value, p_inf, "0x0p+0", Status::OK, Category::Zero), + (p_normal_value, m_inf, "-0x0p+0", Status::OK, Category::Zero), + ( + p_normal_value, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_normal_value, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (p_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_normal_value, + p_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + p_largest_value, + "0x1p-128", + underflow_status, + Category::Normal, + ), + ( + p_normal_value, + m_largest_value, + "-0x1p-128", + underflow_status, + Category::Normal, + ), + ( + p_normal_value, + p_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_normal_value, + m_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + p_normal_value, + p_smallest_normalized, + "0x1p+126", + Status::OK, + Category::Normal, + ), + ( + p_normal_value, + m_smallest_normalized, + "-0x1p+126", + Status::OK, + Category::Normal, + ), + (m_normal_value, p_inf, "-0x0p+0", Status::OK, Category::Zero), + (m_normal_value, m_inf, "0x0p+0", Status::OK, Category::Zero), + ( + m_normal_value, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_normal_value, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (m_normal_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_normal_value, + p_normal_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_normal_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + p_largest_value, + "-0x1p-128", + underflow_status, + Category::Normal, + ), + ( + m_normal_value, + m_largest_value, + "0x1p-128", + underflow_status, + Category::Normal, + ), + ( + m_normal_value, + p_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_normal_value, + m_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + m_normal_value, + p_smallest_normalized, + "-0x1p+126", + Status::OK, + Category::Normal, + ), + ( + m_normal_value, + m_smallest_normalized, + "0x1p+126", + Status::OK, + Category::Normal, + ), + (p_largest_value, p_inf, "0x0p+0", Status::OK, Category::Zero), + ( + p_largest_value, + m_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_largest_value, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_largest_value, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (p_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_largest_value, + p_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_largest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + m_largest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_largest_value, + p_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + p_smallest_normalized, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_largest_value, + m_smallest_normalized, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + (m_largest_value, m_inf, "0x0p+0", Status::OK, Category::Zero), + ( + m_largest_value, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_largest_value, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (m_largest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_largest_value, + p_normal_value, + "-0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_normal_value, + "0x1.fffffep+127", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_largest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + m_largest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_largest_value, + p_smallest_value, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_smallest_value, + "inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + p_smallest_normalized, + "-inf", + overflow_status, + Category::Infinity, + ), + ( + m_largest_value, + m_smallest_normalized, + "inf", + overflow_status, + Category::Infinity, + ), + ( + p_smallest_value, + p_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + m_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_value, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_smallest_value, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (p_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_value, + p_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + m_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_value, + p_smallest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + p_smallest_normalized, + "0x1p-23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_value, + m_smallest_normalized, + "-0x1p-23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + m_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_value, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_smallest_value, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + (m_smallest_value, qnan, "nan", Status::OK, Category::NaN), + /* +// See Note 1. +(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_value, + p_normal_value, + "-0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_normal_value, + "0x1p-149", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + m_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_value, + p_smallest_value, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_value, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + p_smallest_normalized, + "-0x1p-23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_value, + m_smallest_normalized, + "0x1p-23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + m_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + p_smallest_normalized, + p_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_smallest_normalized, + m_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + p_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + p_smallest_normalized, + p_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + m_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + p_smallest_normalized, + p_smallest_value, + "0x1p+23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_value, + "-0x1p+23", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + p_smallest_normalized, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ( + p_smallest_normalized, + m_smallest_normalized, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_inf, + "-0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + m_inf, + "0x0p+0", + Status::OK, + Category::Zero, + ), + ( + m_smallest_normalized, + p_zero, + "-inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_smallest_normalized, + m_zero, + "inf", + Status::DIV_BY_ZERO, + Category::Infinity, + ), + ( + m_smallest_normalized, + qnan, + "nan", + Status::OK, + Category::NaN, + ), + /* +// See Note 1. +(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN), + */ + ( + m_smallest_normalized, + p_normal_value, + "-0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_normal_value, + "0x1p-126", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_largest_value, + "-0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + m_largest_value, + "0x0p+0", + underflow_status, + Category::Zero, + ), + ( + m_smallest_normalized, + p_smallest_value, + "-0x1p+23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_value, + "0x1p+23", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + p_smallest_normalized, + "-0x1p+0", + Status::OK, + Category::Normal, + ), + ( + m_smallest_normalized, + m_smallest_normalized, + "0x1p+0", + Status::OK, + Category::Normal, + ), + ]; + + for &(x, y, e_result, e_status, e_category) in &special_cases[..] { + let status; + let result = unpack!(status=, x / y); + assert_eq!(status, e_status); + assert_eq!(result.category(), e_category); + assert!(result.bitwise_eq(e_result.parse::().unwrap())); + } +} + +#[test] +fn operator_overloads() { + // This is mostly testing that these operator overloads compile. + let one = "0x1p+0".parse::().unwrap(); + let two = "0x2p+0".parse::().unwrap(); + assert!(two.bitwise_eq((one + one).value)); + assert!(one.bitwise_eq((two - one).value)); + assert!(two.bitwise_eq((one * two).value)); + assert!(one.bitwise_eq((two / two).value)); +} + +#[test] +fn abs() { + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let p_qnan = Single::NAN; + let m_qnan = -Single::NAN; + let p_snan = Single::snan(None); + let m_snan = -Single::snan(None); + let p_normal_value = "0x1p+0".parse::().unwrap(); + let m_normal_value = "-0x1p+0".parse::().unwrap(); + let p_largest_value = Single::largest(); + let m_largest_value = -Single::largest(); + let p_smallest_value = Single::SMALLEST; + let m_smallest_value = -Single::SMALLEST; + let p_smallest_normalized = Single::smallest_normalized(); + let m_smallest_normalized = -Single::smallest_normalized(); + + assert!(p_inf.bitwise_eq(p_inf.abs())); + assert!(p_inf.bitwise_eq(m_inf.abs())); + assert!(p_zero.bitwise_eq(p_zero.abs())); + assert!(p_zero.bitwise_eq(m_zero.abs())); + assert!(p_qnan.bitwise_eq(p_qnan.abs())); + assert!(p_qnan.bitwise_eq(m_qnan.abs())); + assert!(p_snan.bitwise_eq(p_snan.abs())); + assert!(p_snan.bitwise_eq(m_snan.abs())); + assert!(p_normal_value.bitwise_eq(p_normal_value.abs())); + assert!(p_normal_value.bitwise_eq(m_normal_value.abs())); + assert!(p_largest_value.bitwise_eq(p_largest_value.abs())); + assert!(p_largest_value.bitwise_eq(m_largest_value.abs())); + assert!(p_smallest_value.bitwise_eq(p_smallest_value.abs())); + assert!(p_smallest_value.bitwise_eq(m_smallest_value.abs())); + assert!(p_smallest_normalized.bitwise_eq( + p_smallest_normalized.abs(), + )); + assert!(p_smallest_normalized.bitwise_eq( + m_smallest_normalized.abs(), + )); +} + +#[test] +fn neg() { + let one = "1.0".parse::().unwrap(); + let neg_one = "-1.0".parse::().unwrap(); + let zero = Single::ZERO; + let neg_zero = -Single::ZERO; + let inf = Single::INFINITY; + let neg_inf = -Single::INFINITY; + let qnan = Single::NAN; + let neg_qnan = -Single::NAN; + + assert!(neg_one.bitwise_eq(-one)); + assert!(one.bitwise_eq(-neg_one)); + assert!(neg_zero.bitwise_eq(-zero)); + assert!(zero.bitwise_eq(-neg_zero)); + assert!(neg_inf.bitwise_eq(-inf)); + assert!(inf.bitwise_eq(-neg_inf)); + assert!(neg_inf.bitwise_eq(-inf)); + assert!(inf.bitwise_eq(-neg_inf)); + assert!(neg_qnan.bitwise_eq(-qnan)); + assert!(qnan.bitwise_eq(-neg_qnan)); +} + +#[test] +fn ilogb() { + assert_eq!(-1074, Double::SMALLEST.ilogb()); + assert_eq!(-1074, (-Double::SMALLEST).ilogb()); + assert_eq!( + -1023, + "0x1.ffffffffffffep-1024".parse::().unwrap().ilogb() + ); + assert_eq!( + -1023, + "0x1.ffffffffffffep-1023".parse::().unwrap().ilogb() + ); + assert_eq!( + -1023, + "-0x1.ffffffffffffep-1023" + .parse::() + .unwrap() + .ilogb() + ); + assert_eq!(-51, "0x1p-51".parse::().unwrap().ilogb()); + assert_eq!( + -1023, + "0x1.c60f120d9f87cp-1023".parse::().unwrap().ilogb() + ); + assert_eq!(-2, "0x0.ffffp-1".parse::().unwrap().ilogb()); + assert_eq!(-1023, "0x1.fffep-1023".parse::().unwrap().ilogb()); + assert_eq!(1023, Double::largest().ilogb()); + assert_eq!(1023, (-Double::largest()).ilogb()); + + + assert_eq!(0, "0x1p+0".parse::().unwrap().ilogb()); + assert_eq!(0, "-0x1p+0".parse::().unwrap().ilogb()); + assert_eq!(42, "0x1p+42".parse::().unwrap().ilogb()); + assert_eq!(-42, "0x1p-42".parse::().unwrap().ilogb()); + + assert_eq!(IEK_INF, Single::INFINITY.ilogb()); + assert_eq!(IEK_INF, (-Single::INFINITY).ilogb()); + assert_eq!(IEK_ZERO, Single::ZERO.ilogb()); + assert_eq!(IEK_ZERO, (-Single::ZERO).ilogb()); + assert_eq!(IEK_NAN, Single::NAN.ilogb()); + assert_eq!(IEK_NAN, Single::snan(None).ilogb()); + + assert_eq!(127, Single::largest().ilogb()); + assert_eq!(127, (-Single::largest()).ilogb()); + + assert_eq!(-149, Single::SMALLEST.ilogb()); + assert_eq!(-149, (-Single::SMALLEST).ilogb()); + assert_eq!(-126, Single::smallest_normalized().ilogb()); + assert_eq!(-126, (-Single::smallest_normalized()).ilogb()); +} + +#[test] +fn scalbn() { + assert!("0x1p+0".parse::().unwrap().bitwise_eq( + "0x1p+0".parse::().unwrap().scalbn(0), + )); + assert!("0x1p+42".parse::().unwrap().bitwise_eq( + "0x1p+0".parse::().unwrap().scalbn(42), + )); + assert!("0x1p-42".parse::().unwrap().bitwise_eq( + "0x1p+0".parse::().unwrap().scalbn(-42), + )); + + let p_inf = Single::INFINITY; + let m_inf = -Single::INFINITY; + let p_zero = Single::ZERO; + let m_zero = -Single::ZERO; + let p_qnan = Single::NAN; + let m_qnan = -Single::NAN; + let snan = Single::snan(None); + + assert!(p_inf.bitwise_eq(p_inf.scalbn(0))); + assert!(m_inf.bitwise_eq(m_inf.scalbn(0))); + assert!(p_zero.bitwise_eq(p_zero.scalbn(0))); + assert!(m_zero.bitwise_eq(m_zero.scalbn(0))); + assert!(p_qnan.bitwise_eq(p_qnan.scalbn(0))); + assert!(m_qnan.bitwise_eq(m_qnan.scalbn(0))); + assert!(!snan.scalbn(0).is_signaling()); + + let scalbn_snan = snan.scalbn(1); + assert!(scalbn_snan.is_nan() && !scalbn_snan.is_signaling()); + + // Make sure highest bit of payload is preserved. + let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1; + + let snan_with_payload = Double::snan(Some(payload)); + let quiet_payload = snan_with_payload.scalbn(1); + assert!(quiet_payload.is_nan() && !quiet_payload.is_signaling()); + assert_eq!(payload, quiet_payload.to_bits() & ((1 << 51) - 1)); + + assert!(p_inf.bitwise_eq( + "0x1p+0".parse::().unwrap().scalbn(128), + )); + assert!(m_inf.bitwise_eq( + "-0x1p+0".parse::().unwrap().scalbn(128), + )); + assert!(p_inf.bitwise_eq( + "0x1p+127".parse::().unwrap().scalbn(1), + )); + assert!(p_zero.bitwise_eq( + "0x1p-127".parse::().unwrap().scalbn(-127), + )); + assert!(m_zero.bitwise_eq( + "-0x1p-127".parse::().unwrap().scalbn(-127), + )); + assert!("-0x1p-149".parse::().unwrap().bitwise_eq( + "-0x1p-127".parse::().unwrap().scalbn(-22), + )); + assert!(p_zero.bitwise_eq( + "0x1p-126".parse::().unwrap().scalbn(-24), + )); + + + let smallest_f64 = Double::SMALLEST; + let neg_smallest_f64 = -Double::SMALLEST; + + let largest_f64 = Double::largest(); + let neg_largest_f64 = -Double::largest(); + + let largest_denormal_f64 = "0x1.ffffffffffffep-1023".parse::().unwrap(); + let neg_largest_denormal_f64 = "-0x1.ffffffffffffep-1023".parse::().unwrap(); + + + assert!(smallest_f64.bitwise_eq( + "0x1p-1074".parse::().unwrap().scalbn(0), + )); + assert!(neg_smallest_f64.bitwise_eq( + "-0x1p-1074".parse::().unwrap().scalbn(0), + )); + + assert!("0x1p+1023".parse::().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2097, + ), + )); + + assert!(smallest_f64.scalbn(-2097).is_pos_zero()); + assert!(smallest_f64.scalbn(-2098).is_pos_zero()); + assert!(smallest_f64.scalbn(-2099).is_pos_zero()); + assert!("0x1p+1022".parse::().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2096, + ), + )); + assert!("0x1p+1023".parse::().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2097, + ), + )); + assert!(smallest_f64.scalbn(2098).is_infinite()); + assert!(smallest_f64.scalbn(2099).is_infinite()); + + // Test for integer overflows when adding to exponent. + assert!(smallest_f64.scalbn(-ExpInt::max_value()).is_pos_zero()); + assert!(largest_f64.scalbn(ExpInt::max_value()).is_infinite()); + + assert!(largest_denormal_f64.bitwise_eq( + largest_denormal_f64.scalbn(0), + )); + assert!(neg_largest_denormal_f64.bitwise_eq( + neg_largest_denormal_f64.scalbn(0), + )); + + assert!( + "0x1.ffffffffffffep-1022" + .parse::() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1)) + ); + assert!( + "-0x1.ffffffffffffep-1021" + .parse::() + .unwrap() + .bitwise_eq(neg_largest_denormal_f64.scalbn(2)) + ); + + assert!( + "0x1.ffffffffffffep+1" + .parse::() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1024)) + ); + assert!(largest_denormal_f64.scalbn(-1023).is_pos_zero()); + assert!(largest_denormal_f64.scalbn(-1024).is_pos_zero()); + assert!(largest_denormal_f64.scalbn(-2048).is_pos_zero()); + assert!(largest_denormal_f64.scalbn(2047).is_infinite()); + assert!(largest_denormal_f64.scalbn(2098).is_infinite()); + assert!(largest_denormal_f64.scalbn(2099).is_infinite()); + + assert!( + "0x1.ffffffffffffep-2" + .parse::() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1021)) + ); + assert!( + "0x1.ffffffffffffep-1" + .parse::() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1022)) + ); + assert!( + "0x1.ffffffffffffep+0" + .parse::() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(1023)) + ); + assert!( + "0x1.ffffffffffffep+1023" + .parse::() + .unwrap() + .bitwise_eq(largest_denormal_f64.scalbn(2046)) + ); + assert!("0x1p+974".parse::().unwrap().bitwise_eq( + smallest_f64.scalbn( + 2048, + ), + )); + + let random_denormal_f64 = "0x1.c60f120d9f87cp+51".parse::().unwrap(); + assert!( + "0x1.c60f120d9f87cp-972" + .parse::() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-1023)) + ); + assert!( + "0x1.c60f120d9f87cp-1" + .parse::() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-52)) + ); + assert!( + "0x1.c60f120d9f87cp-2" + .parse::() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-53)) + ); + assert!( + "0x1.c60f120d9f87cp+0" + .parse::() + .unwrap() + .bitwise_eq(random_denormal_f64.scalbn(-51)) + ); + + assert!(random_denormal_f64.scalbn(-2097).is_pos_zero()); + assert!(random_denormal_f64.scalbn(-2090).is_pos_zero()); + + + assert!("-0x1p-1073".parse::().unwrap().bitwise_eq( + neg_largest_f64.scalbn(-2097), + )); + + assert!("-0x1p-1024".parse::().unwrap().bitwise_eq( + neg_largest_f64.scalbn(-2048), + )); + + assert!("0x1p-1073".parse::().unwrap().bitwise_eq( + largest_f64.scalbn( + -2097, + ), + )); + + assert!("0x1p-1074".parse::().unwrap().bitwise_eq( + largest_f64.scalbn( + -2098, + ), + )); + assert!("-0x1p-1074".parse::().unwrap().bitwise_eq( + neg_largest_f64.scalbn(-2098), + )); + assert!(neg_largest_f64.scalbn(-2099).is_neg_zero()); + assert!(largest_f64.scalbn(1).is_infinite()); + + + assert!("0x1p+0".parse::().unwrap().bitwise_eq( + "0x1p+52".parse::().unwrap().scalbn(-52), + )); + + assert!("0x1p-103".parse::().unwrap().bitwise_eq( + "0x1p-51".parse::().unwrap().scalbn(-52), + )); +} + +#[test] +fn frexp() { + let p_zero = Double::ZERO; + let m_zero = -Double::ZERO; + let one = Double::from_f64(1.0); + let m_one = Double::from_f64(-1.0); + + let largest_denormal = "0x1.ffffffffffffep-1023".parse::().unwrap(); + let neg_largest_denormal = "-0x1.ffffffffffffep-1023".parse::().unwrap(); + + let smallest = Double::SMALLEST; + let neg_smallest = -Double::SMALLEST; + + let largest = Double::largest(); + let neg_largest = -Double::largest(); + + let p_inf = Double::INFINITY; + let m_inf = -Double::INFINITY; + + let p_qnan = Double::NAN; + let m_qnan = -Double::NAN; + let snan = Double::snan(None); + + // Make sure highest bit of payload is preserved. + let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1; + + let snan_with_payload = Double::snan(Some(payload)); + + let mut exp = 0; + + let frac = p_zero.frexp(&mut exp); + assert_eq!(0, exp); + assert!(frac.is_pos_zero()); + + let frac = m_zero.frexp(&mut exp); + assert_eq!(0, exp); + assert!(frac.is_neg_zero()); + + + let frac = one.frexp(&mut exp); + assert_eq!(1, exp); + assert!("0x1p-1".parse::().unwrap().bitwise_eq(frac)); + + let frac = m_one.frexp(&mut exp); + assert_eq!(1, exp); + assert!("-0x1p-1".parse::().unwrap().bitwise_eq(frac)); + + let frac = largest_denormal.frexp(&mut exp); + assert_eq!(-1022, exp); + assert!( + "0x1.ffffffffffffep-1" + .parse::() + .unwrap() + .bitwise_eq(frac) + ); + + let frac = neg_largest_denormal.frexp(&mut exp); + assert_eq!(-1022, exp); + assert!( + "-0x1.ffffffffffffep-1" + .parse::() + .unwrap() + .bitwise_eq(frac) + ); + + + let frac = smallest.frexp(&mut exp); + assert_eq!(-1073, exp); + assert!("0x1p-1".parse::().unwrap().bitwise_eq(frac)); + + let frac = neg_smallest.frexp(&mut exp); + assert_eq!(-1073, exp); + assert!("-0x1p-1".parse::().unwrap().bitwise_eq(frac)); + + + let frac = largest.frexp(&mut exp); + assert_eq!(1024, exp); + assert!( + "0x1.fffffffffffffp-1" + .parse::() + .unwrap() + .bitwise_eq(frac) + ); + + let frac = neg_largest.frexp(&mut exp); + assert_eq!(1024, exp); + assert!( + "-0x1.fffffffffffffp-1" + .parse::() + .unwrap() + .bitwise_eq(frac) + ); + + + let frac = p_inf.frexp(&mut exp); + assert_eq!(IEK_INF, exp); + assert!(frac.is_infinite() && !frac.is_negative()); + + let frac = m_inf.frexp(&mut exp); + assert_eq!(IEK_INF, exp); + assert!(frac.is_infinite() && frac.is_negative()); + + let frac = p_qnan.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan()); + + let frac = m_qnan.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan()); + + let frac = snan.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan() && !frac.is_signaling()); + + let frac = snan_with_payload.frexp(&mut exp); + assert_eq!(IEK_NAN, exp); + assert!(frac.is_nan() && !frac.is_signaling()); + assert_eq!(payload, frac.to_bits() & ((1 << 51) - 1)); + + let frac = "0x0.ffffp-1".parse::().unwrap().frexp(&mut exp); + assert_eq!(-1, exp); + assert!("0x1.fffep-1".parse::().unwrap().bitwise_eq(frac)); + + let frac = "0x1p-51".parse::().unwrap().frexp(&mut exp); + assert_eq!(-50, exp); + assert!("0x1p-1".parse::().unwrap().bitwise_eq(frac)); + + let frac = "0x1.c60f120d9f87cp+51".parse::().unwrap().frexp( + &mut exp, + ); + assert_eq!(52, exp); + assert!( + "0x1.c60f120d9f87cp-1" + .parse::() + .unwrap() + .bitwise_eq(frac) + ); +} + +#[test] +fn modulo() { + let mut status; + { + let f1 = "1.5".parse::().unwrap(); + let f2 = "1.0".parse::().unwrap(); + let expected = "0.5".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0.5".parse::().unwrap(); + let f2 = "1.0".parse::().unwrap(); + let expected = "0.5".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0x1.3333333333333p-2".parse::().unwrap(); // 0.3 + let f2 = "0x1.47ae147ae147bp-7".parse::().unwrap(); // 0.01 + // 0.009999999999999983 + let expected = "0x1.47ae147ae1471p-7".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0x1p64".parse::().unwrap(); // 1.8446744073709552e19 + let f2 = "1.5".parse::().unwrap(); + let expected = "1.0".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0x1p1000".parse::().unwrap(); + let f2 = "0x1p-1000".parse::().unwrap(); + let expected = "0.0".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "0.0".parse::().unwrap(); + let f2 = "1.0".parse::().unwrap(); + let expected = "0.0".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).bitwise_eq(expected)); + assert_eq!(status, Status::OK); + } + { + let f1 = "1.0".parse::().unwrap(); + let f2 = "0.0".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).is_nan()); + assert_eq!(status, Status::INVALID_OP); + } + { + let f1 = "0.0".parse::().unwrap(); + let f2 = "0.0".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).is_nan()); + assert_eq!(status, Status::INVALID_OP); + } + { + let f1 = Double::INFINITY; + let f2 = "1.0".parse::().unwrap(); + assert!(unpack!(status=, f1 % f2).is_nan()); + assert_eq!(status, Status::INVALID_OP); + } +} diff --git a/src/librustc_apfloat/tests/ppc.rs b/src/librustc_apfloat/tests/ppc.rs new file mode 100644 index 000000000000..145c3ddc869d --- /dev/null +++ b/src/librustc_apfloat/tests/ppc.rs @@ -0,0 +1,655 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate rustc_apfloat; + +use rustc_apfloat::{Category, Float, Round}; +use rustc_apfloat::ppc::DoubleDouble; + +use std::cmp::Ordering; + +#[test] +fn ppc_double_double() { + let test = DoubleDouble::ZERO; + let expected = "0x0p+0".parse::().unwrap(); + assert!(test.is_zero()); + assert!(!test.is_negative()); + assert!(test.bitwise_eq(expected)); + assert_eq!(0, test.to_bits()); + + let test = -DoubleDouble::ZERO; + let expected = "-0x0p+0".parse::().unwrap(); + assert!(test.is_zero()); + assert!(test.is_negative()); + assert!(test.bitwise_eq(expected)); + assert_eq!(0x8000000000000000, test.to_bits()); + + let test = "1.0".parse::().unwrap(); + assert_eq!(0x3ff0000000000000, test.to_bits()); + + // LDBL_MAX + let test = "1.79769313486231580793728971405301e+308" + .parse::() + .unwrap(); + assert_eq!(0x7c8ffffffffffffe_7fefffffffffffff, test.to_bits()); + + // LDBL_MIN + let test = "2.00416836000897277799610805135016e-292" + .parse::() + .unwrap(); + assert_eq!(0x0000000000000000_0360000000000000, test.to_bits()); +} + +#[test] +fn ppc_double_double_add_special() { + let data = [ + // (1 + 0) + (-1 + 0) = Category::Zero + ( + 0x3ff0000000000000, + 0xbff0000000000000, + Category::Zero, + Round::NearestTiesToEven, + ), + // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x7948000000000000, + Category::Infinity, + Round::NearestTiesToEven, + ), + // FIXME: change the 4th 0x75effffffffffffe to 0x75efffffffffffff when + // DoubleDouble's fallback is gone. + // LDBL_MAX + (1.011111... >> (1023 - 106) + (1.1111111...0 >> (1023 - + // 160))) = Category::Normal + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x75effffffffffffe_7947ffffffffffff, + Category::Normal, + Round::NearestTiesToEven, + ), + // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x7c8ffffffffffffe_7fefffffffffffff, + Category::Infinity, + Round::NearestTiesToEven, + ), + // NaN + (1 + 0) = Category::NaN + ( + 0x7ff8000000000000, + 0x3ff0000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.add_r(a2, round).value; + + assert_eq!(expected, a1.category(), "{:#x} + {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.add_r(a1, round).value; + + assert_eq!(expected, a2.category(), "{:#x} + {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_add() { + let data = [ + // (1 + 0) + (1e-105 + 0) = (1 + 1e-105) + ( + 0x3ff0000000000000, + 0x3960000000000000, + 0x3960000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 0) + (1e-106 + 0) = (1 + 1e-106) + ( + 0x3ff0000000000000, + 0x3950000000000000, + 0x3950000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 1e-106) + (1e-106 + 0) = (1 + 1e-105) + ( + 0x3950000000000000_3ff0000000000000, + 0x3950000000000000, + 0x3960000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 0) + (epsilon + 0) = (1 + epsilon) + ( + 0x3ff0000000000000, + 0x0000000000000001, + 0x0000000000000001_3ff0000000000000, + Round::NearestTiesToEven, + ), + // FIXME: change 0xf950000000000000 to 0xf940000000000000, when + // DoubleDouble's fallback is gone. + // (DBL_MAX - 1 << (1023 - 105)) + (1 << (1023 - 53) + 0) = DBL_MAX + + // 1.11111... << (1023 - 52) + ( + 0xf950000000000000_7fefffffffffffff, + 0x7c90000000000000, + 0x7c8ffffffffffffe_7fefffffffffffff, + Round::NearestTiesToEven, + ), + // FIXME: change 0xf950000000000000 to 0xf940000000000000, when + // DoubleDouble's fallback is gone. + // (1 << (1023 - 53) + 0) + (DBL_MAX - 1 << (1023 - 105)) = DBL_MAX + + // 1.11111... << (1023 - 52) + ( + 0x7c90000000000000, + 0xf950000000000000_7fefffffffffffff, + 0x7c8ffffffffffffe_7fefffffffffffff, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.add_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} + {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.add_r(a1, round).value; + + assert_eq!(expected, a2.to_bits(), "{:#x} + {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_subtract() { + let data = [ + // (1 + 0) - (-1e-105 + 0) = (1 + 1e-105) + ( + 0x3ff0000000000000, + 0xb960000000000000, + 0x3960000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + 0) - (-1e-106 + 0) = (1 + 1e-106) + ( + 0x3ff0000000000000, + 0xb950000000000000, + 0x3950000000000000_3ff0000000000000, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.sub_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} - {:#x}", op1, op2); + } +} + +#[test] +fn ppc_double_double_multiply_special() { + let data = [ + // Category::NaN * Category::NaN = Category::NaN + ( + 0x7ff8000000000000, + 0x7ff8000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::NaN * Category::Zero = Category::NaN + ( + 0x7ff8000000000000, + 0, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::NaN * Category::Infinity = Category::NaN + ( + 0x7ff8000000000000, + 0x7ff0000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::NaN * Category::Normal = Category::NaN + ( + 0x7ff8000000000000, + 0x3ff0000000000000, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::Infinity * Category::Infinity = Category::Infinity + ( + 0x7ff0000000000000, + 0x7ff0000000000000, + Category::Infinity, + Round::NearestTiesToEven, + ), + // Category::Infinity * Category::Zero = Category::NaN + ( + 0x7ff0000000000000, + 0, + Category::NaN, + Round::NearestTiesToEven, + ), + // Category::Infinity * Category::Normal = Category::Infinity + ( + 0x7ff0000000000000, + 0x3ff0000000000000, + Category::Infinity, + Round::NearestTiesToEven, + ), + // Category::Zero * Category::Zero = Category::Zero + (0, 0, Category::Zero, Round::NearestTiesToEven), + // Category::Zero * Category::Normal = Category::Zero + ( + 0, + 0x3ff0000000000000, + Category::Zero, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.mul_r(a2, round).value; + + assert_eq!(expected, a1.category(), "{:#x} * {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.mul_r(a1, round).value; + + assert_eq!(expected, a2.category(), "{:#x} * {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_multiply() { + let data = [ + // 1/3 * 3 = 1.0 + ( + 0x3c75555555555556_3fd5555555555555, + 0x4008000000000000, + 0x3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + epsilon) * (1 + 0) = Category::Zero + ( + 0x0000000000000001_3ff0000000000000, + 0x3ff0000000000000, + 0x0000000000000001_3ff0000000000000, + Round::NearestTiesToEven, + ), + // (1 + epsilon) * (1 + epsilon) = 1 + 2 * epsilon + ( + 0x0000000000000001_3ff0000000000000, + 0x0000000000000001_3ff0000000000000, + 0x0000000000000002_3ff0000000000000, + Round::NearestTiesToEven, + ), + // -(1 + epsilon) * (1 + epsilon) = -1 + ( + 0x0000000000000001_bff0000000000000, + 0x0000000000000001_3ff0000000000000, + 0xbff0000000000000, + Round::NearestTiesToEven, + ), + // (0.5 + 0) * (1 + 2 * epsilon) = 0.5 + epsilon + ( + 0x3fe0000000000000, + 0x0000000000000002_3ff0000000000000, + 0x0000000000000001_3fe0000000000000, + Round::NearestTiesToEven, + ), + // (0.5 + 0) * (1 + epsilon) = 0.5 + ( + 0x3fe0000000000000, + 0x0000000000000001_3ff0000000000000, + 0x3fe0000000000000, + Round::NearestTiesToEven, + ), + // __LDBL_MAX__ * (1 + 1 << 106) = inf + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x3950000000000000_3ff0000000000000, + 0x7ff0000000000000, + Round::NearestTiesToEven, + ), + // __LDBL_MAX__ * (1 + 1 << 107) > __LDBL_MAX__, but not inf, yes =_=||| + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x3940000000000000_3ff0000000000000, + 0x7c8fffffffffffff_7fefffffffffffff, + Round::NearestTiesToEven, + ), + // __LDBL_MAX__ * (1 + 1 << 108) = __LDBL_MAX__ + ( + 0x7c8ffffffffffffe_7fefffffffffffff, + 0x3930000000000000_3ff0000000000000, + 0x7c8ffffffffffffe_7fefffffffffffff, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.mul_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} * {:#x}", op1, op2); + } + { + let a1 = DoubleDouble::from_bits(op1); + let mut a2 = DoubleDouble::from_bits(op2); + a2 = a2.mul_r(a1, round).value; + + assert_eq!(expected, a2.to_bits(), "{:#x} * {:#x}", op2, op1); + } + } +} + +#[test] +fn ppc_double_double_divide() { + // FIXME: Only a sanity check for now. Add more edge cases when the + // double-double algorithm is implemented. + let data = [ + // 1 / 3 = 1/3 + ( + 0x3ff0000000000000, + 0x4008000000000000, + 0x3c75555555555556_3fd5555555555555, + Round::NearestTiesToEven, + ), + ]; + + for &(op1, op2, expected, round) in &data { + let mut a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + a1 = a1.div_r(a2, round).value; + + assert_eq!(expected, a1.to_bits(), "{:#x} / {:#x}", op1, op2); + } +} + +#[test] +fn ppc_double_double_remainder() { + let data = [ + // ieee_rem(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53) + ( + 0x3cb8000000000000_4008000000000000, + 0x3ca4000000000000_3ff4000000000000, + 0x3c90000000000000_3fe0000000000000, + ), + // ieee_rem(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (-0.5 - 0.5 << 53) + ( + 0x3cb8000000000000_4008000000000000, + 0x3cac000000000000_3ffc000000000000, + 0xbc90000000000000_bfe0000000000000, + ), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + let result = a1.ieee_rem(a2).value; + + assert_eq!( + expected, + result.to_bits(), + "ieee_rem({:#x}, {:#x})", + op1, + op2 + ); + } +} + +#[test] +fn ppc_double_double_mod() { + let data = [ + // mod(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53) + ( + 0x3cb8000000000000_4008000000000000, + 0x3ca4000000000000_3ff4000000000000, + 0x3c90000000000000_3fe0000000000000, + ), + // mod(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (1.25 + 1.25 << 53) + // 0xbc98000000000000 doesn't seem right, but it's what we currently have. + // FIXME: investigate + ( + 0x3cb8000000000000_4008000000000000, + 0x3cac000000000000_3ffc000000000000, + 0xbc98000000000000_3ff4000000000001, + ), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + let r = (a1 % a2).value; + + assert_eq!(expected, r.to_bits(), "fmod({:#x}, {:#x})", op1, op2); + } +} + +#[test] +fn ppc_double_double_fma() { + // Sanity check for now. + let mut a = "2".parse::().unwrap(); + a = a.mul_add( + "3".parse::().unwrap(), + "4".parse::().unwrap(), + ).value; + assert_eq!( + Some(Ordering::Equal), + "10".parse::().unwrap().partial_cmp(&a) + ); +} + +#[test] +fn ppc_double_double_round_to_integral() { + { + let a = "1.5".parse::().unwrap(); + let a = a.round_to_integral(Round::NearestTiesToEven).value; + assert_eq!( + Some(Ordering::Equal), + "2".parse::().unwrap().partial_cmp(&a) + ); + } + { + let a = "2.5".parse::().unwrap(); + let a = a.round_to_integral(Round::NearestTiesToEven).value; + assert_eq!( + Some(Ordering::Equal), + "2".parse::().unwrap().partial_cmp(&a) + ); + } +} + +#[test] +fn ppc_double_double_compare() { + let data = [ + // (1 + 0) = (1 + 0) + ( + 0x3ff0000000000000, + 0x3ff0000000000000, + Some(Ordering::Equal), + ), + // (1 + 0) < (1.00...1 + 0) + (0x3ff0000000000000, 0x3ff0000000000001, Some(Ordering::Less)), + // (1.00...1 + 0) > (1 + 0) + ( + 0x3ff0000000000001, + 0x3ff0000000000000, + Some(Ordering::Greater), + ), + // (1 + 0) < (1 + epsilon) + ( + 0x3ff0000000000000, + 0x0000000000000001_3ff0000000000001, + Some(Ordering::Less), + ), + // NaN != NaN + (0x7ff8000000000000, 0x7ff8000000000000, None), + // (1 + 0) != NaN + (0x3ff0000000000000, 0x7ff8000000000000, None), + // Inf = Inf + ( + 0x7ff0000000000000, + 0x7ff0000000000000, + Some(Ordering::Equal), + ), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + assert_eq!( + expected, + a1.partial_cmp(&a2), + "compare({:#x}, {:#x})", + op1, + op2, + ); + } +} + +#[test] +fn ppc_double_double_bitwise_eq() { + let data = [ + // (1 + 0) = (1 + 0) + (0x3ff0000000000000, 0x3ff0000000000000, true), + // (1 + 0) != (1.00...1 + 0) + (0x3ff0000000000000, 0x3ff0000000000001, false), + // NaN = NaN + (0x7ff8000000000000, 0x7ff8000000000000, true), + // NaN != NaN with a different bit pattern + ( + 0x7ff8000000000000, + 0x3ff0000000000000_7ff8000000000000, + false, + ), + // Inf = Inf + (0x7ff0000000000000, 0x7ff0000000000000, true), + ]; + + for &(op1, op2, expected) in &data { + let a1 = DoubleDouble::from_bits(op1); + let a2 = DoubleDouble::from_bits(op2); + assert_eq!(expected, a1.bitwise_eq(a2), "{:#x} = {:#x}", op1, op2); + } +} + +#[test] +fn ppc_double_double_change_sign() { + let float = DoubleDouble::from_bits(0xbcb0000000000000_400f000000000000); + { + let actual = float.copy_sign("1".parse::().unwrap()); + assert_eq!(0xbcb0000000000000_400f000000000000, actual.to_bits()); + } + { + let actual = float.copy_sign("-1".parse::().unwrap()); + assert_eq!(0x3cb0000000000000_c00f000000000000, actual.to_bits()); + } +} + +#[test] +fn ppc_double_double_factories() { + assert_eq!(0, DoubleDouble::ZERO.to_bits()); + assert_eq!( + 0x7c8ffffffffffffe_7fefffffffffffff, + DoubleDouble::largest().to_bits() + ); + assert_eq!(0x0000000000000001, DoubleDouble::SMALLEST.to_bits()); + assert_eq!( + 0x0360000000000000, + DoubleDouble::smallest_normalized().to_bits() + ); + assert_eq!( + 0x0000000000000000_8000000000000000, + (-DoubleDouble::ZERO).to_bits() + ); + assert_eq!( + 0xfc8ffffffffffffe_ffefffffffffffff, + (-DoubleDouble::largest()).to_bits() + ); + assert_eq!( + 0x0000000000000000_8000000000000001, + (-DoubleDouble::SMALLEST).to_bits() + ); + assert_eq!( + 0x0000000000000000_8360000000000000, + (-DoubleDouble::smallest_normalized()).to_bits() + ); + assert!(DoubleDouble::SMALLEST.is_smallest()); + assert!(DoubleDouble::largest().is_largest()); +} + +#[test] +fn ppc_double_double_is_denormal() { + assert!(DoubleDouble::SMALLEST.is_denormal()); + assert!(!DoubleDouble::largest().is_denormal()); + assert!(!DoubleDouble::smallest_normalized().is_denormal()); + { + // (4 + 3) is not normalized + let data = 0x4008000000000000_4010000000000000; + assert!(DoubleDouble::from_bits(data).is_denormal()); + } +} + +#[test] +fn ppc_double_double_exact_inverse() { + assert!( + "2.0" + .parse::() + .unwrap() + .get_exact_inverse() + .unwrap() + .bitwise_eq("0.5".parse::().unwrap()) + ); +} + +#[test] +fn ppc_double_double_scalbn() { + // 3.0 + 3.0 << 53 + let input = 0x3cb8000000000000_4008000000000000; + let result = DoubleDouble::from_bits(input).scalbn(1); + // 6.0 + 6.0 << 53 + assert_eq!(0x3cc8000000000000_4018000000000000, result.to_bits()); +} + +#[test] +fn ppc_double_double_frexp() { + // 3.0 + 3.0 << 53 + let input = 0x3cb8000000000000_4008000000000000; + let mut exp = 0; + // 0.75 + 0.75 << 53 + let result = DoubleDouble::from_bits(input).frexp(&mut exp); + assert_eq!(2, exp); + assert_eq!(0x3c98000000000000_3fe8000000000000, result.to_bits()); +} From 1409d205ad030743007d819374c6b3efa3570f64 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Fri, 28 Jul 2017 21:10:06 +0300 Subject: [PATCH 149/213] rustc_apfloat: complete the IEEE & PPC implementations. --- src/librustc_apfloat/ieee.rs | 2578 +++++++++++++++++++++++++++++++++- src/librustc_apfloat/lib.rs | 2 + src/librustc_apfloat/ppc.rs | 344 ++++- 3 files changed, 2834 insertions(+), 90 deletions(-) diff --git a/src/librustc_apfloat/ieee.rs b/src/librustc_apfloat/ieee.rs index aab426a5caff..3545a77c75de 100644 --- a/src/librustc_apfloat/ieee.rs +++ b/src/librustc_apfloat/ieee.rs @@ -8,19 +8,55 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use {Category, ExpInt}; -use {Float, FloatConvert, ParseError, Round, StatusAnd}; +use {Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; +use {Float, FloatConvert, ParseError, Round, Status, StatusAnd}; -use std::cmp::Ordering; -use std::fmt; +use std::cmp::{self, Ordering}; +use std::convert::TryFrom; +use std::fmt::{self, Write}; use std::marker::PhantomData; +use std::mem; use std::ops::Neg; #[must_use] pub struct IeeeFloat { + /// Absolute significand value (including the integer bit). + sig: [Limb; 1], + + /// The signed unbiased exponent of the value. + exp: ExpInt, + + /// What kind of floating point number this is. + category: Category, + + /// Sign bit of the number. + sign: bool, + marker: PhantomData, } +/// Fundamental unit of big integer arithmetic, but also +/// large to store the largest significands by itself. +type Limb = u128; +const LIMB_BITS: usize = 128; +fn limbs_for_bits(bits: usize) -> usize { + (bits + LIMB_BITS - 1) / LIMB_BITS +} + +/// Enum that represents what fraction of the LSB truncated bits of an fp number +/// represent. +/// +/// This essentially combines the roles of guard and sticky bits. +#[must_use] +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum Loss { + // Example of truncated bits: + ExactlyZero, // 000000 + LessThanHalf, // 0xxxxx x's not all zero + ExactlyHalf, // 100000 + MoreThanHalf, // 1xxxxx x's not all zero +} + /// Represents floating point arithmetic semantics. pub trait Semantics: Sized { /// Total number of bits in the in-memory format. @@ -36,6 +72,84 @@ pub trait Semantics: Sized { /// The smallest E such that 2^E is a normalized number; this /// matches the definition of IEEE 754. const MIN_EXP: ExpInt = -Self::MAX_EXP + 1; + + /// The significand bit that marks NaN as quiet. + const QNAN_BIT: usize = Self::PRECISION - 2; + + /// The significand bitpattern to mark a NaN as quiet. + /// NOTE: for X87DoubleExtended we need to set two bits instead of 2. + const QNAN_SIGNIFICAND: Limb = 1 << Self::QNAN_BIT; + + fn from_bits(bits: u128) -> IeeeFloat { + assert!(Self::BITS > Self::PRECISION); + + let sign = bits & (1 << (Self::BITS - 1)); + let exponent = (bits & !sign) >> (Self::PRECISION - 1); + let mut r = IeeeFloat { + sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)], + // Convert the exponent from its bias representation to a signed integer. + exp: (exponent as ExpInt) - Self::MAX_EXP, + category: Category::Zero, + sign: sign != 0, + marker: PhantomData, + }; + + if r.exp == Self::MIN_EXP - 1 && r.sig == [0] { + // Exponent, significand meaningless. + r.category = Category::Zero; + } else if r.exp == Self::MAX_EXP + 1 && r.sig == [0] { + // Exponent, significand meaningless. + r.category = Category::Infinity; + } else if r.exp == Self::MAX_EXP + 1 && r.sig != [0] { + // Sign, exponent, significand meaningless. + r.category = Category::NaN; + } else { + r.category = Category::Normal; + if r.exp == Self::MIN_EXP - 1 { + // Denormal. + r.exp = Self::MIN_EXP; + } else { + // Set integer bit. + sig::set_bit(&mut r.sig, Self::PRECISION - 1); + } + } + + r + } + + fn to_bits(x: IeeeFloat) -> u128 { + assert!(Self::BITS > Self::PRECISION); + + // Split integer bit from significand. + let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1); + let mut significand = x.sig[0] & ((1 << (Self::PRECISION - 1)) - 1); + let exponent = match x.category { + Category::Normal => { + if x.exp == Self::MIN_EXP && !integer_bit { + // Denormal. + Self::MIN_EXP - 1 + } else { + x.exp + } + } + Category::Zero => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 0; + Self::MIN_EXP - 1 + } + Category::Infinity => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 0; + Self::MAX_EXP + 1 + } + Category::NaN => Self::MAX_EXP + 1, + }; + + // Convert the exponent from a signed integer to its bias representation. + let exponent = (exponent + Self::MAX_EXP) as u128; + + ((x.sign as u128) << (Self::BITS - 1)) | (exponent << (Self::PRECISION - 1)) | significand + } } impl Copy for IeeeFloat {} @@ -70,6 +184,82 @@ impl Semantics for X87DoubleExtendedS { const BITS: usize = 80; const PRECISION: usize = 64; const MAX_EXP: ExpInt = (1 << (15 - 1)) - 1; + + /// For x87 extended precision, we want to make a NaN, not a + /// pseudo-NaN. Maybe we should expose the ability to make + /// pseudo-NaNs? + const QNAN_SIGNIFICAND: Limb = 0b11 << Self::QNAN_BIT; + + /// Integer bit is explicit in this format. Intel hardware (387 and later) + /// does not support these bit patterns: + /// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity") + /// exponent = all 1's, integer bit 0, significand nonzero ("pseudoNaN") + /// exponent = 0, integer bit 1 ("pseudodenormal") + /// exponent!=0 nor all 1's, integer bit 0 ("unnormal") + /// At the moment, the first two are treated as NaNs, the second two as Normal. + fn from_bits(bits: u128) -> IeeeFloat { + let sign = bits & (1 << (Self::BITS - 1)); + let exponent = (bits & !sign) >> Self::PRECISION; + let mut r = IeeeFloat { + sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)], + // Convert the exponent from its bias representation to a signed integer. + exp: (exponent as ExpInt) - Self::MAX_EXP, + category: Category::Zero, + sign: sign != 0, + marker: PhantomData, + }; + + if r.exp == Self::MIN_EXP - 1 && r.sig == [0] { + // Exponent, significand meaningless. + r.category = Category::Zero; + } else if r.exp == Self::MAX_EXP + 1 && r.sig == [1 << (Self::PRECISION - 1)] { + // Exponent, significand meaningless. + r.category = Category::Infinity; + } else if r.exp == Self::MAX_EXP + 1 && r.sig != [1 << (Self::PRECISION - 1)] { + // Sign, exponent, significand meaningless. + r.category = Category::NaN; + } else { + r.category = Category::Normal; + if r.exp == Self::MIN_EXP - 1 { + // Denormal. + r.exp = Self::MIN_EXP; + } + } + + r + } + + fn to_bits(x: IeeeFloat) -> u128 { + // Get integer bit from significand. + let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1); + let mut significand = x.sig[0] & ((1 << Self::PRECISION) - 1); + let exponent = match x.category { + Category::Normal => { + if x.exp == Self::MIN_EXP && !integer_bit { + // Denormal. + Self::MIN_EXP - 1 + } else { + x.exp + } + } + Category::Zero => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 0; + Self::MIN_EXP - 1 + } + Category::Infinity => { + // FIXME(eddyb) Maybe we should guarantee an invariant instead? + significand = 1 << (Self::PRECISION - 1); + Self::MAX_EXP + 1 + } + Category::NaN => Self::MAX_EXP + 1, + }; + + // Convert the exponent from a signed integer to its bias representation. + let exponent = (exponent + Self::MAX_EXP) as u128; + + ((x.sign as u128) << (Self::BITS - 1)) | (exponent << Self::PRECISION) | significand + } } float_common_impls!(IeeeFloat); @@ -80,17 +270,40 @@ impl PartialEq for IeeeFloat { } } -#[allow(unused)] impl PartialOrd for IeeeFloat { fn partial_cmp(&self, rhs: &Self) -> Option { - panic!("NYI PartialOrd::partial_cmp"); + match (self.category, rhs.category) { + (Category::NaN, _) | + (_, Category::NaN) => None, + + (Category::Infinity, Category::Infinity) => Some((!self.sign).cmp(&(!rhs.sign))), + + (Category::Zero, Category::Zero) => Some(Ordering::Equal), + + (Category::Infinity, _) | + (Category::Normal, Category::Zero) => Some((!self.sign).cmp(&self.sign)), + + (_, Category::Infinity) | + (Category::Zero, Category::Normal) => Some(rhs.sign.cmp(&(!rhs.sign))), + + (Category::Normal, Category::Normal) => { + // Two normal numbers. Do they have the same sign? + Some((!self.sign).cmp(&(!rhs.sign)).then_with(|| { + // Compare absolute values; invert result if negative. + let result = self.cmp_abs_normal(*rhs); + + if self.sign { result.reverse() } else { result } + })) + } + } } } impl Neg for IeeeFloat { type Output = Self; - fn neg(self) -> Self { - panic!("NYI Neg::neg"); + fn neg(mut self) -> Self { + self.sign = !self.sign; + self } } @@ -120,146 +333,2401 @@ impl Neg for IeeeFloat { /// 1.01E-2 5 2 0.0101 /// 1.01E-2 4 2 0.0101 /// 1.01E-2 4 1 1.01E-2 -#[allow(unused)] impl fmt::Display for IeeeFloat { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let frac_digits = f.precision().unwrap_or(0); let width = f.width().unwrap_or(3); let alternate = f.alternate(); - panic!("NYI Display::fmt"); + + match self.category { + Category::Infinity => { + if self.sign { + return f.write_str("-Inf"); + } else { + return f.write_str("+Inf"); + } + } + + Category::NaN => return f.write_str("NaN"), + + Category::Zero => { + if self.sign { + f.write_char('-')?; + } + + if width == 0 { + if alternate { + f.write_str("0.0")?; + if let Some(n) = f.precision() { + for _ in 1..n { + f.write_char('0')?; + } + } + f.write_str("e+00")?; + } else { + f.write_str("0.0E+0")?; + } + } else { + f.write_char('0')?; + } + return Ok(()); + } + + Category::Normal => {} + } + + if self.sign { + f.write_char('-')?; + } + + // We use enough digits so the number can be round-tripped back to an + // APFloat. The formula comes from "How to Print Floating-Point Numbers + // Accurately" by Steele and White. + // FIXME: Using a formula based purely on the precision is conservative; + // we can print fewer digits depending on the actual value being printed. + + // precision = 2 + floor(S::PRECISION / lg_2(10)) + let precision = f.precision().unwrap_or(2 + S::PRECISION * 59 / 196); + + // Decompose the number into an APInt and an exponent. + let mut exp = self.exp - (S::PRECISION as ExpInt - 1); + let mut sig = vec![self.sig[0]]; + + // Ignore trailing binary zeros. + let trailing_zeros = sig[0].trailing_zeros(); + let _: Loss = sig::shift_right(&mut sig, &mut exp, trailing_zeros as usize); + + // Change the exponent from 2^e to 10^e. + if exp == 0 { + // Nothing to do. + } else if exp > 0 { + // Just shift left. + let shift = exp as usize; + sig.resize(limbs_for_bits(S::PRECISION + shift), 0); + sig::shift_left(&mut sig, &mut exp, shift); + } else { + // exp < 0 + let mut texp = -exp as usize; + + // We transform this using the identity: + // (N)(2^-e) == (N)(5^e)(10^-e) + + // Multiply significand by 5^e. + // N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8) + let mut sig_scratch = vec![]; + let mut p5 = vec![]; + let mut p5_scratch = vec![]; + while texp != 0 { + if p5.is_empty() { + p5.push(5); + } else { + p5_scratch.resize(p5.len() * 2, 0); + let _: Loss = + sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS); + while p5_scratch.last() == Some(&0) { + p5_scratch.pop(); + } + mem::swap(&mut p5, &mut p5_scratch); + } + if texp & 1 != 0 { + sig_scratch.resize(sig.len() + p5.len(), 0); + let _: Loss = sig::mul( + &mut sig_scratch, + &mut 0, + &sig, + &p5, + (sig.len() + p5.len()) * LIMB_BITS, + ); + while sig_scratch.last() == Some(&0) { + sig_scratch.pop(); + } + mem::swap(&mut sig, &mut sig_scratch); + } + texp >>= 1; + } + } + + // Fill the buffer. + let mut buffer = vec![]; + + // Ignore digits from the significand until it is no more + // precise than is required for the desired precision. + // 196/59 is a very slight overestimate of lg_2(10). + let required = (precision * 196 + 58) / 59; + let mut discard_digits = sig::omsb(&sig).saturating_sub(required) * 59 / 196; + let mut in_trail = true; + while !sig.is_empty() { + // Perform short division by 10 to extract the rightmost digit. + // rem <- sig % 10 + // sig <- sig / 10 + let mut rem = 0; + for limb in sig.iter_mut().rev() { + // We don't have an integer doubly wide than Limb, + // so we have to split the divrem on two halves. + const HALF_BITS: usize = LIMB_BITS / 2; + let mut halves = [*limb & ((1 << HALF_BITS) - 1), *limb >> HALF_BITS]; + for half in halves.iter_mut().rev() { + *half |= rem << HALF_BITS; + rem = *half % 10; + *half /= 10; + } + *limb = halves[0] | (halves[1] << HALF_BITS); + } + // Reduce the sigificand to avoid wasting time dividing 0's. + while sig.last() == Some(&0) { + sig.pop(); + } + + let digit = rem; + + // Ignore digits we don't need. + if discard_digits > 0 { + discard_digits -= 1; + exp += 1; + continue; + } + + // Drop trailing zeros. + if in_trail && digit == 0 { + exp += 1; + } else { + in_trail = false; + buffer.push(b'0' + digit as u8); + } + } + + assert!(!buffer.is_empty(), "no characters in buffer!"); + + // Drop down to precision. + // FIXME: don't do more precise calculations above than are required. + if buffer.len() > precision { + // The most significant figures are the last ones in the buffer. + let mut first_sig = buffer.len() - precision; + + // Round. + // FIXME: this probably shouldn't use 'round half up'. + + // Rounding down is just a truncation, except we also want to drop + // trailing zeros from the new result. + if buffer[first_sig - 1] < b'5' { + while first_sig < buffer.len() && buffer[first_sig] == b'0' { + first_sig += 1; + } + } else { + // Rounding up requires a decimal add-with-carry. If we continue + // the carry, the newly-introduced zeros will just be truncated. + for x in &mut buffer[first_sig..] { + if *x == b'9' { + first_sig += 1; + } else { + *x += 1; + break; + } + } + } + + exp += first_sig as ExpInt; + buffer.drain(..first_sig); + + // If we carried through, we have exactly one digit of precision. + if buffer.is_empty() { + buffer.push(b'1'); + } + } + + let digits = buffer.len(); + + // Check whether we should use scientific notation. + let scientific = if width == 0 { + true + } else { + if exp >= 0 { + // 765e3 --> 765000 + // ^^^ + // But we shouldn't make the number look more precise than it is. + exp as usize > width || digits + exp as usize > precision + } else { + // Power of the most significant digit. + let msd = exp + (digits - 1) as ExpInt; + if msd >= 0 { + // 765e-2 == 7.65 + false + } else { + // 765e-5 == 0.00765 + // ^ ^^ + -msd as usize > width + } + } + }; + + // Scientific formatting is pretty straightforward. + if scientific { + exp += digits as ExpInt - 1; + + f.write_char(buffer[digits - 1] as char)?; + f.write_char('.')?; + let truncate_zero = !alternate; + if digits == 1 && truncate_zero { + f.write_char('0')?; + } else { + for &d in buffer[..digits - 1].iter().rev() { + f.write_char(d as char)?; + } + } + // Fill with zeros up to precision. + if !truncate_zero && precision > digits - 1 { + for _ in 0..precision - digits + 1 { + f.write_char('0')?; + } + } + // For alternate we use lower 'e'. + f.write_char(if alternate { 'e' } else { 'E' })?; + + // Exponent always at least two digits if we do not truncate zeros. + if truncate_zero { + write!(f, "{:+}", exp)?; + } else { + write!(f, "{:+03}", exp)?; + } + + return Ok(()); + } + + // Non-scientific, positive exponents. + if exp >= 0 { + for &d in buffer.iter().rev() { + f.write_char(d as char)?; + } + for _ in 0..exp { + f.write_char('0')?; + } + return Ok(()); + } + + // Non-scientific, negative exponents. + let unit_place = -exp as usize; + if unit_place < digits { + for &d in buffer[unit_place..].iter().rev() { + f.write_char(d as char)?; + } + f.write_char('.')?; + for &d in buffer[..unit_place].iter().rev() { + f.write_char(d as char)?; + } + } else { + f.write_str("0.")?; + for _ in digits..unit_place { + f.write_char('0')?; + } + for &d in buffer.iter().rev() { + f.write_char(d as char)?; + } + } + + Ok(()) } } impl fmt::Debug for IeeeFloat { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) + write!(f, "{}({:?} | {}{:?} * 2^{})", + self, self.category, + if self.sign { "-" } else { "+" }, + self.sig, + self.exp) } } -#[allow(unused)] impl Float for IeeeFloat { const BITS: usize = S::BITS; const PRECISION: usize = S::PRECISION; const MAX_EXP: ExpInt = S::MAX_EXP; const MIN_EXP: ExpInt = S::MIN_EXP; - const ZERO: Self = IeeeFloat { marker: PhantomData }; + const ZERO: Self = IeeeFloat { + sig: [0], + exp: S::MIN_EXP - 1, + category: Category::Zero, + sign: false, + marker: PhantomData, + }; - const INFINITY: Self = IeeeFloat { marker: PhantomData }; + const INFINITY: Self = IeeeFloat { + sig: [0], + exp: S::MAX_EXP + 1, + category: Category::Infinity, + sign: false, + marker: PhantomData, + }; // FIXME(eddyb) remove when qnan becomes const fn. - const NAN: Self = IeeeFloat { marker: PhantomData }; + const NAN: Self = IeeeFloat { + sig: [S::QNAN_SIGNIFICAND], + exp: S::MAX_EXP + 1, + category: Category::NaN, + sign: false, + marker: PhantomData, + }; fn qnan(payload: Option) -> Self { - panic!("NYI qnan") + IeeeFloat { + sig: [ + S::QNAN_SIGNIFICAND | + payload.map_or(0, |payload| { + // Zero out the excess bits of the significand. + payload & ((1 << S::QNAN_BIT) - 1) + }), + ], + exp: S::MAX_EXP + 1, + category: Category::NaN, + sign: false, + marker: PhantomData, + } } fn snan(payload: Option) -> Self { - panic!("NYI snan") + let mut snan = Self::qnan(payload); + + // We always have to clear the QNaN bit to make it an SNaN. + sig::clear_bit(&mut snan.sig, S::QNAN_BIT); + + // If there are no bits set in the payload, we have to set + // *something* to make it a NaN instead of an infinity; + // conventionally, this is the next bit down from the QNaN bit. + if snan.sig[0] & !S::QNAN_SIGNIFICAND == 0 { + sig::set_bit(&mut snan.sig, S::QNAN_BIT - 1); + } + + snan } fn largest() -> Self { - panic!("NYI largest") + // We want (in interchange format): + // exponent = 1..10 + // significand = 1..1 + IeeeFloat { + sig: [!0 & ((1 << S::PRECISION) - 1)], + exp: S::MAX_EXP, + category: Category::Normal, + sign: false, + marker: PhantomData, + } } - const SMALLEST: Self = IeeeFloat { marker: PhantomData }; + // We want (in interchange format): + // exponent = 0..0 + // significand = 0..01 + const SMALLEST: Self = IeeeFloat { + sig: [1], + exp: S::MIN_EXP, + category: Category::Normal, + sign: false, + marker: PhantomData, + }; fn smallest_normalized() -> Self { - panic!("NYI smallest_normalized") + // We want (in interchange format): + // exponent = 0..0 + // significand = 10..0 + IeeeFloat { + sig: [1 << (S::PRECISION - 1)], + exp: S::MIN_EXP, + category: Category::Normal, + sign: false, + marker: PhantomData, + } } - fn add_r(self, rhs: Self, round: Round) -> StatusAnd { - panic!("NYI add_r") + fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd { + let status = match (self.category, rhs.category) { + (Category::Infinity, Category::Infinity) => { + // Differently signed infinities can only be validly + // subtracted. + if self.sign != rhs.sign { + self = Self::NAN; + Status::INVALID_OP + } else { + Status::OK + } + } + + // Sign may depend on rounding mode; handled below. + (_, Category::Zero) | + (Category::NaN, _) | + (Category::Infinity, Category::Normal) => Status::OK, + + (Category::Zero, _) | + (_, Category::NaN) | + (_, Category::Infinity) => { + self = rhs; + Status::OK + } + + // This return code means it was not a simple case. + (Category::Normal, Category::Normal) => { + let loss = sig::add_or_sub( + &mut self.sig, + &mut self.exp, + &mut self.sign, + &mut [rhs.sig[0]], + rhs.exp, + rhs.sign, + ); + let status; + self = unpack!(status=, self.normalize(round, loss)); + + // Can only be zero if we lost no fraction. + assert!(self.category != Category::Zero || loss == Loss::ExactlyZero); + + status + } + }; + + // If two numbers add (exactly) to zero, IEEE 754 decrees it is a + // positive zero unless rounding to minus infinity, except that + // adding two like-signed zeroes gives that zero. + if self.category == Category::Zero && + (rhs.category != Category::Zero || self.sign != rhs.sign) + { + self.sign = round == Round::TowardNegative; + } + + status.and(self) } - fn mul_r(self, rhs: Self, round: Round) -> StatusAnd { - panic!("NYI mul_r") + fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd { + self.sign ^= rhs.sign; + + match (self.category, rhs.category) { + (Category::NaN, _) => { + self.sign = false; + Status::OK.and(self) + } + + (_, Category::NaN) => { + self.sign = false; + self.category = Category::NaN; + self.sig = rhs.sig; + Status::OK.and(self) + } + + (Category::Zero, Category::Infinity) | + (Category::Infinity, Category::Zero) => Status::INVALID_OP.and(Self::NAN), + + (_, Category::Infinity) | + (Category::Infinity, _) => { + self.category = Category::Infinity; + Status::OK.and(self) + } + + (Category::Zero, _) | + (_, Category::Zero) => { + self.category = Category::Zero; + Status::OK.and(self) + } + + (Category::Normal, Category::Normal) => { + self.exp += rhs.exp; + let mut wide_sig = [0; 2]; + let loss = sig::mul( + &mut wide_sig, + &mut self.exp, + &self.sig, + &rhs.sig, + S::PRECISION, + ); + self.sig = [wide_sig[0]]; + let mut status; + self = unpack!(status=, self.normalize(round, loss)); + if loss != Loss::ExactlyZero { + status |= Status::INEXACT; + } + status.and(self) + } + } } - fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd { - panic!("NYI mul_add_r") + fn mul_add_r(mut self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd { + // If and only if all arguments are normal do we need to do an + // extended-precision calculation. + if !self.is_finite_non_zero() || !multiplicand.is_finite_non_zero() || !addend.is_finite() { + let mut status; + self = unpack!(status=, self.mul_r(multiplicand, round)); + + // FS can only be Status::OK or Status::INVALID_OP. There is no more work + // to do in the latter case. The IEEE-754R standard says it is + // implementation-defined in this case whether, if ADDEND is a + // quiet NaN, we raise invalid op; this implementation does so. + // + // If we need to do the addition we can do so with normal + // precision. + if status == Status::OK { + self = unpack!(status=, self.add_r(addend, round)); + } + return status.and(self); + } + + // Post-multiplication sign, before addition. + self.sign ^= multiplicand.sign; + + // Allocate space for twice as many bits as the original significand, plus one + // extra bit for the addition to overflow into. + assert!(limbs_for_bits(S::PRECISION * 2 + 1) <= 2); + let mut wide_sig = sig::widening_mul(self.sig[0], multiplicand.sig[0]); + + let mut loss = Loss::ExactlyZero; + let mut omsb = sig::omsb(&wide_sig); + self.exp += multiplicand.exp; + + // Assume the operands involved in the multiplication are single-precision + // FP, and the two multiplicants are: + // lhs = a23 . a22 ... a0 * 2^e1 + // rhs = b23 . b22 ... b0 * 2^e2 + // the result of multiplication is: + // lhs = c48 c47 c46 . c45 ... c0 * 2^(e1+e2) + // Note that there are three significant bits at the left-hand side of the + // radix point: two for the multiplication, and an overflow bit for the + // addition (that will always be zero at this point). Move the radix point + // toward left by two bits, and adjust exponent accordingly. + self.exp += 2; + + if addend.is_non_zero() { + // Normalize our MSB to one below the top bit to allow for overflow. + let ext_precision = 2 * S::PRECISION + 1; + if omsb != ext_precision - 1 { + assert!(ext_precision > omsb); + sig::shift_left(&mut wide_sig, &mut self.exp, (ext_precision - 1) - omsb); + } + + // The intermediate result of the multiplication has "2 * S::PRECISION" + // signicant bit; adjust the addend to be consistent with mul result. + let mut ext_addend_sig = [addend.sig[0], 0]; + + // Extend the addend significand to ext_precision - 1. This guarantees + // that the high bit of the significand is zero (same as wide_sig), + // so the addition will overflow (if it does overflow at all) into the top bit. + sig::shift_left( + &mut ext_addend_sig, + &mut 0, + ext_precision - 1 - S::PRECISION, + ); + loss = sig::add_or_sub( + &mut wide_sig, + &mut self.exp, + &mut self.sign, + &mut ext_addend_sig, + addend.exp + 1, + addend.sign, + ); + + omsb = sig::omsb(&wide_sig); + } + + // Convert the result having "2 * S::PRECISION" significant-bits back to the one + // having "S::PRECISION" significant-bits. First, move the radix point from + // poision "2*S::PRECISION - 1" to "S::PRECISION - 1". The exponent need to be + // adjusted by "2*S::PRECISION - 1" - "S::PRECISION - 1" = "S::PRECISION". + self.exp -= S::PRECISION as ExpInt + 1; + + // In case MSB resides at the left-hand side of radix point, shift the + // mantissa right by some amount to make sure the MSB reside right before + // the radix point (i.e. "MSB . rest-significant-bits"). + if omsb > S::PRECISION { + let bits = omsb - S::PRECISION; + loss = sig::shift_right(&mut wide_sig, &mut self.exp, bits).combine(loss); + } + + self.sig[0] = wide_sig[0]; + + let mut status; + self = unpack!(status=, self.normalize(round, loss)); + if loss != Loss::ExactlyZero { + status |= Status::INEXACT; + } + + // If two numbers add (exactly) to zero, IEEE 754 decrees it is a + // positive zero unless rounding to minus infinity, except that + // adding two like-signed zeroes gives that zero. + if self.category == Category::Zero && !status.intersects(Status::UNDERFLOW) && + self.sign != addend.sign + { + self.sign = round == Round::TowardNegative; + } + + status.and(self) } - fn div_r(self, rhs: Self, round: Round) -> StatusAnd { - panic!("NYI div_r") + fn div_r(mut self, rhs: Self, round: Round) -> StatusAnd { + self.sign ^= rhs.sign; + + match (self.category, rhs.category) { + (Category::NaN, _) => { + self.sign = false; + Status::OK.and(self) + } + + (_, Category::NaN) => { + self.category = Category::NaN; + self.sig = rhs.sig; + self.sign = false; + Status::OK.and(self) + } + + (Category::Infinity, Category::Infinity) | + (Category::Zero, Category::Zero) => Status::INVALID_OP.and(Self::NAN), + + (Category::Infinity, _) | + (Category::Zero, _) => Status::OK.and(self), + + (Category::Normal, Category::Infinity) => { + self.category = Category::Zero; + Status::OK.and(self) + } + + (Category::Normal, Category::Zero) => { + self.category = Category::Infinity; + Status::DIV_BY_ZERO.and(self) + } + + (Category::Normal, Category::Normal) => { + self.exp -= rhs.exp; + let dividend = self.sig[0]; + let loss = sig::div( + &mut self.sig, + &mut self.exp, + &mut [dividend], + &mut [rhs.sig[0]], + S::PRECISION, + ); + let mut status; + self = unpack!(status=, self.normalize(round, loss)); + if loss != Loss::ExactlyZero { + status |= Status::INEXACT; + } + status.and(self) + } + } } - fn c_fmod(self, rhs: Self) -> StatusAnd { - panic!("NYI c_fmod") + fn c_fmod(mut self, rhs: Self) -> StatusAnd { + match (self.category, rhs.category) { + (Category::NaN, _) | + (Category::Zero, Category::Infinity) | + (Category::Zero, Category::Normal) | + (Category::Normal, Category::Infinity) => Status::OK.and(self), + + (_, Category::NaN) => { + self.sign = false; + self.category = Category::NaN; + self.sig = rhs.sig; + Status::OK.and(self) + } + + (Category::Infinity, _) | + (_, Category::Zero) => Status::INVALID_OP.and(Self::NAN), + + (Category::Normal, Category::Normal) => { + while self.is_finite_non_zero() && rhs.is_finite_non_zero() && + self.cmp_abs_normal(rhs) != Ordering::Less + { + let mut v = rhs.scalbn(self.ilogb() - rhs.ilogb()); + if self.cmp_abs_normal(v) == Ordering::Less { + v = v.scalbn(-1); + } + v.sign = self.sign; + + let status; + self = unpack!(status=, self - v); + assert_eq!(status, Status::OK); + } + Status::OK.and(self) + } + } } fn round_to_integral(self, round: Round) -> StatusAnd { - panic!("NYI round_to_integral") + // If the exponent is large enough, we know that this value is already + // integral, and the arithmetic below would potentially cause it to saturate + // to +/-Inf. Bail out early instead. + if self.is_finite_non_zero() && self.exp + 1 >= S::PRECISION as ExpInt { + return Status::OK.and(self); + } + + // The algorithm here is quite simple: we add 2^(p-1), where p is the + // precision of our format, and then subtract it back off again. The choice + // of rounding modes for the addition/subtraction determines the rounding mode + // for our integral rounding as well. + // NOTE: When the input value is negative, we do subtraction followed by + // addition instead. + assert!(S::PRECISION <= 128); + let mut status; + let magic_const = unpack!(status=, Self::from_u128(1 << (S::PRECISION - 1))); + let magic_const = magic_const.copy_sign(self); + + if status != Status::OK { + return status.and(self); + } + + let mut r = self; + r = unpack!(status=, r.add_r(magic_const, round)); + if status != Status::OK && status != Status::INEXACT { + return status.and(self); + } + + // Restore the input sign to handle 0.0/-0.0 cases correctly. + r.sub_r(magic_const, round).map(|r| r.copy_sign(self)) } - fn next_up(self) -> StatusAnd { - panic!("NYI next_up") + fn next_up(mut self) -> StatusAnd { + // Compute nextUp(x), handling each float category separately. + match self.category { + Category::Infinity => { + if self.sign { + // nextUp(-inf) = -largest + Status::OK.and(-Self::largest()) + } else { + // nextUp(+inf) = +inf + Status::OK.and(self) + } + } + Category::NaN => { + // IEEE-754R 2008 6.2 Par 2: nextUp(sNaN) = qNaN. Set Invalid flag. + // IEEE-754R 2008 6.2: nextUp(qNaN) = qNaN. Must be identity so we do not + // change the payload. + if self.is_signaling() { + // For consistency, propagate the sign of the sNaN to the qNaN. + Status::INVALID_OP.and(Self::NAN.copy_sign(self)) + } else { + Status::OK.and(self) + } + } + Category::Zero => { + // nextUp(pm 0) = +smallest + Status::OK.and(Self::SMALLEST) + } + Category::Normal => { + // nextUp(-smallest) = -0 + if self.is_smallest() && self.sign { + return Status::OK.and(-Self::ZERO); + } + + // nextUp(largest) == INFINITY + if self.is_largest() && !self.sign { + return Status::OK.and(Self::INFINITY); + } + + // Excluding the integral bit. This allows us to test for binade boundaries. + let sig_mask = (1 << (S::PRECISION - 1)) - 1; + + // nextUp(normal) == normal + inc. + if self.sign { + // If we are negative, we need to decrement the significand. + + // We only cross a binade boundary that requires adjusting the exponent + // if: + // 1. exponent != S::MIN_EXP. This implies we are not in the + // smallest binade or are dealing with denormals. + // 2. Our significand excluding the integral bit is all zeros. + let crossing_binade_boundary = self.exp != S::MIN_EXP && + self.sig[0] & sig_mask == 0; + + // Decrement the significand. + // + // We always do this since: + // 1. If we are dealing with a non-binade decrement, by definition we + // just decrement the significand. + // 2. If we are dealing with a normal -> normal binade decrement, since + // we have an explicit integral bit the fact that all bits but the + // integral bit are zero implies that subtracting one will yield a + // significand with 0 integral bit and 1 in all other spots. Thus we + // must just adjust the exponent and set the integral bit to 1. + // 3. If we are dealing with a normal -> denormal binade decrement, + // since we set the integral bit to 0 when we represent denormals, we + // just decrement the significand. + sig::decrement(&mut self.sig); + + if crossing_binade_boundary { + // Our result is a normal number. Do the following: + // 1. Set the integral bit to 1. + // 2. Decrement the exponent. + sig::set_bit(&mut self.sig, S::PRECISION - 1); + self.exp -= 1; + } + } else { + // If we are positive, we need to increment the significand. + + // We only cross a binade boundary that requires adjusting the exponent if + // the input is not a denormal and all of said input's significand bits + // are set. If all of said conditions are true: clear the significand, set + // the integral bit to 1, and increment the exponent. If we have a + // denormal always increment since moving denormals and the numbers in the + // smallest normal binade have the same exponent in our representation. + let crossing_binade_boundary = !self.is_denormal() && + self.sig[0] & sig_mask == sig_mask; + + if crossing_binade_boundary { + self.sig = [0]; + sig::set_bit(&mut self.sig, S::PRECISION - 1); + assert_ne!( + self.exp, + S::MAX_EXP, + "We can not increment an exponent beyond the MAX_EXP \ + allowed by the given floating point semantics." + ); + self.exp += 1; + } else { + sig::increment(&mut self.sig); + } + } + Status::OK.and(self) + } + } } fn from_bits(input: u128) -> Self { - panic!("NYI from_bits") + // Dispatch to semantics. + S::from_bits(input) } fn from_u128_r(input: u128, round: Round) -> StatusAnd { - panic!("NYI from_u128_r") + IeeeFloat { + sig: [input], + exp: S::PRECISION as ExpInt - 1, + category: Category::Normal, + sign: false, + marker: PhantomData, + }.normalize(round, Loss::ExactlyZero) } - fn from_str_r(s: &str, round: Round) -> Result, ParseError> { - panic!("NYI from_str_r") + fn from_str_r(mut s: &str, mut round: Round) -> Result, ParseError> { + if s.is_empty() { + return Err(ParseError("Invalid string length")); + } + + // Handle special cases. + match s { + "inf" | "INFINITY" => return Ok(Status::OK.and(Self::INFINITY)), + "-inf" | "-INFINITY" => return Ok(Status::OK.and(-Self::INFINITY)), + "nan" | "NaN" => return Ok(Status::OK.and(Self::NAN)), + "-nan" | "-NaN" => return Ok(Status::OK.and(-Self::NAN)), + _ => {} + } + + // Handle a leading minus sign. + let minus = s.starts_with("-"); + if minus || s.starts_with("+") { + s = &s[1..]; + if s.is_empty() { + return Err(ParseError("String has no digits")); + } + } + + // Adjust the rounding mode for the absolute value below. + if minus { + round = -round; + } + + let r = if s.starts_with("0x") || s.starts_with("0X") { + s = &s[2..]; + if s.is_empty() { + return Err(ParseError("Invalid string")); + } + Self::from_hexadecimal_string(s, round)? + } else { + Self::from_decimal_string(s, round)? + }; + + Ok(r.map(|r| if minus { -r } else { r })) } fn to_bits(self) -> u128 { - panic!("NYI to_bits") + // Dispatch to semantics. + S::to_bits(self) } fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd { - panic!("NYI to_u128_r"); + // The result of trying to convert a number too large. + let overflow = if self.sign { + // Negative numbers cannot be represented as unsigned. + 0 + } else { + // Largest unsigned integer of the given width. + !0 >> (128 - width) + }; + + *is_exact = false; + + match self.category { + Category::NaN => Status::INVALID_OP.and(0), + + Category::Infinity => Status::INVALID_OP.and(overflow), + + Category::Zero => { + // Negative zero can't be represented as an int. + *is_exact = !self.sign; + Status::OK.and(0) + } + + Category::Normal => { + let mut r = 0; + + // Step 1: place our absolute value, with any fraction truncated, in + // the destination. + let truncated_bits = if self.exp < 0 { + // Our absolute value is less than one; truncate everything. + // For exponent -1 the integer bit represents .5, look at that. + // For smaller exponents leftmost truncated bit is 0. + S::PRECISION - 1 + (-self.exp) as usize + } else { + // We want the most significant (exponent + 1) bits; the rest are + // truncated. + let bits = self.exp as usize + 1; + + // Hopelessly large in magnitude? + if bits > width { + return Status::INVALID_OP.and(overflow); + } + + if bits < S::PRECISION { + // We truncate (S::PRECISION - bits) bits. + r = self.sig[0] >> (S::PRECISION - bits); + S::PRECISION - bits + } else { + // We want at least as many bits as are available. + r = self.sig[0] << (bits - S::PRECISION); + 0 + } + }; + + // Step 2: work out any lost fraction, and increment the absolute + // value if we would round away from zero. + let mut loss = Loss::ExactlyZero; + if truncated_bits > 0 { + loss = Loss::through_truncation(&self.sig, truncated_bits); + if loss != Loss::ExactlyZero && + self.round_away_from_zero(round, loss, truncated_bits) + { + r = r.wrapping_add(1); + if r == 0 { + return Status::INVALID_OP.and(overflow); // Overflow. + } + } + } + + // Step 3: check if we fit in the destination. + if r > overflow { + return Status::INVALID_OP.and(overflow); + } + + if loss == Loss::ExactlyZero { + *is_exact = true; + Status::OK.and(r) + } else { + Status::INEXACT.and(r) + } + } + } } fn cmp_abs_normal(self, rhs: Self) -> Ordering { - panic!("NYI cmp_abs_normal") + assert!(self.is_finite_non_zero()); + assert!(rhs.is_finite_non_zero()); + + // If exponents are equal, do an unsigned comparison of the significands. + self.exp.cmp(&rhs.exp).then_with( + || sig::cmp(&self.sig, &rhs.sig), + ) } fn bitwise_eq(self, rhs: Self) -> bool { - panic!("NYI bitwise_eq") + if self.category != rhs.category || self.sign != rhs.sign { + return false; + } + + if self.category == Category::Zero || self.category == Category::Infinity { + return true; + } + + if self.is_finite_non_zero() && self.exp != rhs.exp { + return false; + } + + self.sig == rhs.sig } fn is_negative(self) -> bool { - panic!("NYI is_negative") + self.sign } fn is_denormal(self) -> bool { - panic!("NYI is_denormal") + self.is_finite_non_zero() && self.exp == S::MIN_EXP && + !sig::get_bit(&self.sig, S::PRECISION - 1) } fn is_signaling(self) -> bool { - panic!("NYI is_signaling") + // IEEE-754R 2008 6.2.1: A signaling NaN bit string should be encoded with the + // first bit of the trailing significand being 0. + self.is_nan() && !sig::get_bit(&self.sig, S::QNAN_BIT) } fn category(self) -> Category { - panic!("NYI category") + self.category } fn get_exact_inverse(self) -> Option { - panic!("NYI get_exact_inverse") + // Special floats and denormals have no exact inverse. + if !self.is_finite_non_zero() { + return None; + } + + // Check that the number is a power of two by making sure that only the + // integer bit is set in the significand. + if self.sig != [1 << (S::PRECISION - 1)] { + return None; + } + + // Get the inverse. + let mut reciprocal = Self::from_u128(1).value; + let status; + reciprocal = unpack!(status=, reciprocal / self); + if status != Status::OK { + return None; + } + + // Avoid multiplication with a denormal, it is not safe on all platforms and + // may be slower than a normal division. + if reciprocal.is_denormal() { + return None; + } + + assert!(reciprocal.is_finite_non_zero()); + assert_eq!(reciprocal.sig, [1 << (S::PRECISION - 1)]); + + Some(reciprocal) } - fn ilogb(self) -> ExpInt { - panic!("NYI ilogb") + fn ilogb(mut self) -> ExpInt { + if self.is_nan() { + return IEK_NAN; + } + if self.is_zero() { + return IEK_ZERO; + } + if self.is_infinite() { + return IEK_INF; + } + if !self.is_denormal() { + return self.exp; + } + + let sig_bits = (S::PRECISION - 1) as ExpInt; + self.exp += sig_bits; + self = self.normalize(Round::NearestTiesToEven, Loss::ExactlyZero) + .value; + self.exp - sig_bits } - fn scalbn_r(self, exp: ExpInt, round: Round) -> Self { - panic!("NYI scalbn") + fn scalbn_r(mut self, exp: ExpInt, round: Round) -> Self { + // If exp is wildly out-of-scale, simply adding it to self.exp will + // overflow; clamp it to a safe range before adding, but ensure that the range + // is large enough that the clamp does not change the result. The range we + // need to support is the difference between the largest possible exponent and + // the normalized exponent of half the smallest denormal. + + let sig_bits = (S::PRECISION - 1) as i32; + let max_change = S::MAX_EXP as i32 - (S::MIN_EXP as i32 - sig_bits) + 1; + + // Clamp to one past the range ends to let normalize handle overflow. + let exp_change = cmp::min(cmp::max(exp as i32, (-max_change - 1)), max_change); + self.exp = self.exp.saturating_add(exp_change as ExpInt); + self = self.normalize(round, Loss::ExactlyZero).value; + if self.is_nan() { + sig::set_bit(&mut self.sig, S::QNAN_BIT); + } + self } - fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self { - panic!("NYI frexp") + fn frexp_r(mut self, exp: &mut ExpInt, round: Round) -> Self { + *exp = self.ilogb(); + + // Quiet signalling nans. + if *exp == IEK_NAN { + sig::set_bit(&mut self.sig, S::QNAN_BIT); + return self; + } + + if *exp == IEK_INF { + return self; + } + + // 1 is added because frexp is defined to return a normalized fraction in + // +/-[0.5, 1.0), rather than the usual +/-[1.0, 2.0). + if *exp == IEK_ZERO { + *exp = 0; + } else { + *exp += 1; + } + self.scalbn_r(-*exp, round) } } -#[allow(unused)] impl FloatConvert> for IeeeFloat { fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd> { - panic!("NYI convert_r"); + let mut r = IeeeFloat { + sig: self.sig, + exp: self.exp, + category: self.category, + sign: self.sign, + marker: PhantomData, + }; + + // x86 has some unusual NaNs which cannot be represented in any other + // format; note them here. + fn is_x87_double_extended() -> bool { + S::QNAN_SIGNIFICAND == X87DoubleExtendedS::QNAN_SIGNIFICAND + } + let x87_special_nan = is_x87_double_extended::() && !is_x87_double_extended::() && + r.category == Category::NaN && + (r.sig[0] & S::QNAN_SIGNIFICAND) != S::QNAN_SIGNIFICAND; + + // If this is a truncation of a denormal number, and the target semantics + // has larger exponent range than the source semantics (this can happen + // when truncating from PowerPC double-double to double format), the + // right shift could lose result mantissa bits. Adjust exponent instead + // of performing excessive shift. + let mut shift = T::PRECISION as ExpInt - S::PRECISION as ExpInt; + if shift < 0 && r.is_finite_non_zero() { + let mut exp_change = sig::omsb(&r.sig) as ExpInt - S::PRECISION as ExpInt; + if r.exp + exp_change < T::MIN_EXP { + exp_change = T::MIN_EXP - r.exp; + } + if exp_change < shift { + exp_change = shift; + } + if exp_change < 0 { + shift -= exp_change; + r.exp += exp_change; + } + } + + // If this is a truncation, perform the shift. + let mut loss = Loss::ExactlyZero; + if shift < 0 && (r.is_finite_non_zero() || r.category == Category::NaN) { + loss = sig::shift_right(&mut r.sig, &mut 0, -shift as usize); + } + + // If this is an extension, perform the shift. + if shift > 0 && (r.is_finite_non_zero() || r.category == Category::NaN) { + sig::shift_left(&mut r.sig, &mut 0, shift as usize); + } + + let status; + if r.is_finite_non_zero() { + r = unpack!(status=, r.normalize(round, loss)); + *loses_info = status != Status::OK; + } else if r.category == Category::NaN { + *loses_info = loss != Loss::ExactlyZero || x87_special_nan; + + // For x87 extended precision, we want to make a NaN, not a special NaN if + // the input wasn't special either. + if !x87_special_nan && is_x87_double_extended::() { + sig::set_bit(&mut r.sig, T::PRECISION - 1); + } + + // gcc forces the Quiet bit on, which means (float)(double)(float_sNan) + // does not give you back the same bits. This is dubious, and we + // don't currently do it. You're really supposed to get + // an invalid operation signal at runtime, but nobody does that. + status = Status::OK; + } else { + *loses_info = false; + status = Status::OK; + } + + status.and(r) + } +} + +impl IeeeFloat { + /// Handle positive overflow. We either return infinity or + /// the largest finite number. For negative overflow, + /// negate the `round` argument before calling. + fn overflow_result(round: Round) -> StatusAnd { + match round { + // Infinity? + Round::NearestTiesToEven | Round::NearestTiesToAway | Round::TowardPositive => { + (Status::OVERFLOW | Status::INEXACT).and(Self::INFINITY) + } + // Otherwise we become the largest finite number. + Round::TowardNegative | Round::TowardZero => Status::INEXACT.and(Self::largest()), + } + } + + /// Returns TRUE if, when truncating the current number, with BIT the + /// new LSB, with the given lost fraction and rounding mode, the result + /// would need to be rounded away from zero (i.e., by increasing the + /// signficand). This routine must work for Category::Zero of both signs, and + /// Category::Normal numbers. + fn round_away_from_zero(&self, round: Round, loss: Loss, bit: usize) -> bool { + // NaNs and infinities should not have lost fractions. + assert!(self.is_finite_non_zero() || self.is_zero()); + + // Current callers never pass this so we don't handle it. + assert_ne!(loss, Loss::ExactlyZero); + + match round { + Round::NearestTiesToAway => loss == Loss::ExactlyHalf || loss == Loss::MoreThanHalf, + Round::NearestTiesToEven => { + if loss == Loss::MoreThanHalf { + return true; + } + + // Our zeros don't have a significand to test. + if loss == Loss::ExactlyHalf && self.category != Category::Zero { + return sig::get_bit(&self.sig, bit); + } + + false + } + Round::TowardZero => false, + Round::TowardPositive => !self.sign, + Round::TowardNegative => self.sign, + } + } + + fn normalize(mut self, round: Round, mut loss: Loss) -> StatusAnd { + if !self.is_finite_non_zero() { + return Status::OK.and(self); + } + + // Before rounding normalize the exponent of Category::Normal numbers. + let mut omsb = sig::omsb(&self.sig); + + if omsb > 0 { + // OMSB is numbered from 1. We want to place it in the integer + // bit numbered PRECISION if possible, with a compensating change in + // the exponent. + let mut final_exp = self.exp.saturating_add( + omsb as ExpInt - S::PRECISION as ExpInt, + ); + + // If the resulting exponent is too high, overflow according to + // the rounding mode. + if final_exp > S::MAX_EXP { + let round = if self.sign { -round } else { round }; + return Self::overflow_result(round).map(|r| r.copy_sign(self)); + } + + // Subnormal numbers have exponent MIN_EXP, and their MSB + // is forced based on that. + if final_exp < S::MIN_EXP { + final_exp = S::MIN_EXP; + } + + // Shifting left is easy as we don't lose precision. + if final_exp < self.exp { + assert_eq!(loss, Loss::ExactlyZero); + + let exp_change = (self.exp - final_exp) as usize; + sig::shift_left(&mut self.sig, &mut self.exp, exp_change); + + return Status::OK.and(self); + } + + // Shift right and capture any new lost fraction. + if final_exp > self.exp { + let exp_change = (final_exp - self.exp) as usize; + loss = sig::shift_right(&mut self.sig, &mut self.exp, exp_change).combine(loss); + + // Keep OMSB up-to-date. + omsb = omsb.saturating_sub(exp_change); + } + } + + // Now round the number according to round given the lost + // fraction. + + // As specified in IEEE 754, since we do not trap we do not report + // underflow for exact results. + if loss == Loss::ExactlyZero { + // Canonicalize zeros. + if omsb == 0 { + self.category = Category::Zero; + } + + return Status::OK.and(self); + } + + // Increment the significand if we're rounding away from zero. + if self.round_away_from_zero(round, loss, 0) { + if omsb == 0 { + self.exp = S::MIN_EXP; + } + + // We should never overflow. + assert_eq!(sig::increment(&mut self.sig), 0); + omsb = sig::omsb(&self.sig); + + // Did the significand increment overflow? + if omsb == S::PRECISION + 1 { + // Renormalize by incrementing the exponent and shifting our + // significand right one. However if we already have the + // maximum exponent we overflow to infinity. + if self.exp == S::MAX_EXP { + self.category = Category::Infinity; + + return (Status::OVERFLOW | Status::INEXACT).and(self); + } + + let _: Loss = sig::shift_right(&mut self.sig, &mut self.exp, 1); + + return Status::INEXACT.and(self); + } + } + + // The normal case - we were and are not denormal, and any + // significand increment above didn't overflow. + if omsb == S::PRECISION { + return Status::INEXACT.and(self); + } + + // We have a non-zero denormal. + assert!(omsb < S::PRECISION); + + // Canonicalize zeros. + if omsb == 0 { + self.category = Category::Zero; + } + + // The Category::Zero case is a denormal that underflowed to zero. + (Status::UNDERFLOW | Status::INEXACT).and(self) + } + + fn from_hexadecimal_string(s: &str, round: Round) -> Result, ParseError> { + let mut r = IeeeFloat { + sig: [0], + exp: 0, + category: Category::Normal, + sign: false, + marker: PhantomData, + }; + + let mut any_digits = false; + let mut has_exp = false; + let mut bit_pos = LIMB_BITS as isize; + let mut loss = None; + + // Without leading or trailing zeros, irrespective of the dot. + let mut first_sig_digit = None; + let mut dot = s.len(); + + for (p, c) in s.char_indices() { + // Skip leading zeros and any (hexa)decimal point. + if c == '.' { + if dot != s.len() { + return Err(ParseError("String contains multiple dots")); + } + dot = p; + } else if let Some(hex_value) = c.to_digit(16) { + any_digits = true; + + if first_sig_digit.is_none() { + if hex_value == 0 { + continue; + } + first_sig_digit = Some(p); + } + + // Store the number while we have space. + bit_pos -= 4; + if bit_pos >= 0 { + r.sig[0] |= (hex_value as Limb) << bit_pos; + } else { + // If zero or one-half (the hexadecimal digit 8) are followed + // by non-zero, they're a little more than zero or one-half. + if let Some(ref mut loss) = loss { + if hex_value != 0 { + if *loss == Loss::ExactlyZero { + *loss = Loss::LessThanHalf; + } + if *loss == Loss::ExactlyHalf { + *loss = Loss::MoreThanHalf; + } + } + } else { + loss = Some(match hex_value { + 0 => Loss::ExactlyZero, + 1...7 => Loss::LessThanHalf, + 8 => Loss::ExactlyHalf, + 9...15 => Loss::MoreThanHalf, + _ => unreachable!(), + }); + } + } + } else if c == 'p' || c == 'P' { + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + if dot == s.len() { + dot = p; + } + + let mut chars = s[p + 1..].chars().peekable(); + + // Adjust for the given exponent. + let exp_minus = chars.peek() == Some(&'-'); + if exp_minus || chars.peek() == Some(&'+') { + chars.next(); + } + + for c in chars { + if let Some(value) = c.to_digit(10) { + has_exp = true; + r.exp = r.exp.saturating_mul(10).saturating_add(value as ExpInt); + } else { + return Err(ParseError("Invalid character in exponent")); + } + } + if !has_exp { + return Err(ParseError("Exponent has no digits")); + } + + if exp_minus { + r.exp = -r.exp; + } + + break; + } else { + return Err(ParseError("Invalid character in significand")); + } + } + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + // Hex floats require an exponent but not a hexadecimal point. + if !has_exp { + return Err(ParseError("Hex strings require an exponent")); + } + + // Ignore the exponent if we are zero. + let first_sig_digit = match first_sig_digit { + Some(p) => p, + None => return Ok(Status::OK.and(Self::ZERO)), + }; + + // Calculate the exponent adjustment implicit in the number of + // significant digits and adjust for writing the significand starting + // at the most significant nibble. + let exp_adjustment = if dot > first_sig_digit { + ExpInt::try_from(dot - first_sig_digit).unwrap() + } else { + -ExpInt::try_from(first_sig_digit - dot - 1).unwrap() + }; + let exp_adjustment = exp_adjustment + .saturating_mul(4) + .saturating_sub(1) + .saturating_add(S::PRECISION as ExpInt) + .saturating_sub(LIMB_BITS as ExpInt); + r.exp = r.exp.saturating_add(exp_adjustment); + + Ok(r.normalize(round, loss.unwrap_or(Loss::ExactlyZero))) + } + + fn from_decimal_string(s: &str, round: Round) -> Result, ParseError> { + // Given a normal decimal floating point number of the form + // + // dddd.dddd[eE][+-]ddd + // + // where the decimal point and exponent are optional, fill out the + // variables below. Exponent is appropriate if the significand is + // treated as an integer, and normalized_exp if the significand + // is taken to have the decimal point after a single leading + // non-zero digit. + // + // If the value is zero, first_sig_digit is None. + + let mut any_digits = false; + let mut dec_exp = 0i32; + + // Without leading or trailing zeros, irrespective of the dot. + let mut first_sig_digit = None; + let mut last_sig_digit = 0; + let mut dot = s.len(); + + for (p, c) in s.char_indices() { + if c == '.' { + if dot != s.len() { + return Err(ParseError("String contains multiple dots")); + } + dot = p; + } else if let Some(dec_value) = c.to_digit(10) { + any_digits = true; + + if dec_value != 0 { + if first_sig_digit.is_none() { + first_sig_digit = Some(p); + } + last_sig_digit = p; + } + } else if c == 'e' || c == 'E' { + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + if dot == s.len() { + dot = p; + } + + let mut chars = s[p + 1..].chars().peekable(); + + // Adjust for the given exponent. + let exp_minus = chars.peek() == Some(&'-'); + if exp_minus || chars.peek() == Some(&'+') { + chars.next(); + } + + any_digits = false; + for c in chars { + if let Some(value) = c.to_digit(10) { + any_digits = true; + dec_exp = dec_exp.saturating_mul(10).saturating_add(value as i32); + } else { + return Err(ParseError("Invalid character in exponent")); + } + } + if !any_digits { + return Err(ParseError("Exponent has no digits")); + } + + if exp_minus { + dec_exp = -dec_exp; + } + + break; + } else { + return Err(ParseError("Invalid character in significand")); + } + } + if !any_digits { + return Err(ParseError("Significand has no digits")); + } + + // Test if we have a zero number allowing for non-zero exponents. + let first_sig_digit = match first_sig_digit { + Some(p) => p, + None => return Ok(Status::OK.and(Self::ZERO)), + }; + + // Adjust the exponents for any decimal point. + if dot > last_sig_digit { + dec_exp = dec_exp.saturating_add((dot - last_sig_digit - 1) as i32); + } else { + dec_exp = dec_exp.saturating_sub((last_sig_digit - dot) as i32); + } + let significand_digits = last_sig_digit - first_sig_digit + 1 - + (dot > first_sig_digit && dot < last_sig_digit) as usize; + let normalized_exp = dec_exp.saturating_add(significand_digits as i32 - 1); + + // Handle the cases where exponents are obviously too large or too + // small. Writing L for log 10 / log 2, a number d.ddddd*10^dec_exp + // definitely overflows if + // + // (dec_exp - 1) * L >= MAX_EXP + // + // and definitely underflows to zero where + // + // (dec_exp + 1) * L <= MIN_EXP - PRECISION + // + // With integer arithmetic the tightest bounds for L are + // + // 93/28 < L < 196/59 [ numerator <= 256 ] + // 42039/12655 < L < 28738/8651 [ numerator <= 65536 ] + + // Check for MAX_EXP. + if normalized_exp.saturating_sub(1).saturating_mul(42039) >= 12655 * S::MAX_EXP as i32 { + // Overflow and round. + return Ok(Self::overflow_result(round)); + } + + // Check for MIN_EXP. + if normalized_exp.saturating_add(1).saturating_mul(28738) <= + 8651 * (S::MIN_EXP as i32 - S::PRECISION as i32) + { + // Underflow to zero and round. + let r = if round == Round::TowardPositive { + IeeeFloat::SMALLEST + } else { + IeeeFloat::ZERO + }; + return Ok((Status::UNDERFLOW | Status::INEXACT).and(r)); + } + + // A tight upper bound on number of bits required to hold an + // N-digit decimal integer is N * 196 / 59. Allocate enough space + // to hold the full significand, and an extra limb required by + // tcMultiplyPart. + let max_limbs = limbs_for_bits(1 + 196 * significand_digits / 59); + let mut dec_sig = Vec::with_capacity(max_limbs); + + // Convert to binary efficiently - we do almost all multiplication + // in a Limb. When this would overflow do we do a single + // bignum multiplication, and then revert again to multiplication + // in a Limb. + let mut chars = s[first_sig_digit..last_sig_digit + 1].chars(); + loop { + let mut val = 0; + let mut multiplier = 1; + + loop { + let dec_value = match chars.next() { + Some('.') => continue, + Some(c) => c.to_digit(10).unwrap(), + None => break, + }; + + multiplier *= 10; + val = val * 10 + dec_value as Limb; + + // The maximum number that can be multiplied by ten with any + // digit added without overflowing a Limb. + if multiplier > (!0 - 9) / 10 { + break; + } + } + + // If we've consumed no digits, we're done. + if multiplier == 1 { + break; + } + + // Multiply out the current limb. + let mut carry = val; + for x in &mut dec_sig { + let [low, mut high] = sig::widening_mul(*x, multiplier); + + // Now add carry. + let (low, overflow) = low.overflowing_add(carry); + high += overflow as Limb; + + *x = low; + carry = high; + } + + // If we had carry, we need another limb (likely but not guaranteed). + if carry > 0 { + dec_sig.push(carry); + } + } + + // Calculate pow(5, abs(dec_exp)) into `pow5_full`. + // The *_calc Vec's are reused scratch space, as an optimization. + let (pow5_full, mut pow5_calc, mut sig_calc, mut sig_scratch_calc) = { + let mut power = dec_exp.abs() as usize; + + const FIRST_EIGHT_POWERS: [Limb; 8] = [1, 5, 25, 125, 625, 3125, 15625, 78125]; + + let mut p5_scratch = vec![]; + let mut p5 = vec![FIRST_EIGHT_POWERS[4]]; + + let mut r_scratch = vec![]; + let mut r = vec![FIRST_EIGHT_POWERS[power & 7]]; + power >>= 3; + + while power > 0 { + // Calculate pow(5,pow(2,n+3)). + p5_scratch.resize(p5.len() * 2, 0); + let _: Loss = sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS); + while p5_scratch.last() == Some(&0) { + p5_scratch.pop(); + } + mem::swap(&mut p5, &mut p5_scratch); + + if power & 1 != 0 { + r_scratch.resize(r.len() + p5.len(), 0); + let _: Loss = sig::mul( + &mut r_scratch, + &mut 0, + &r, + &p5, + (r.len() + p5.len()) * LIMB_BITS, + ); + while r_scratch.last() == Some(&0) { + r_scratch.pop(); + } + mem::swap(&mut r, &mut r_scratch); + } + + power >>= 1; + } + + (r, r_scratch, p5, p5_scratch) + }; + + // Attempt dec_sig * 10^dec_exp with increasing precision. + let mut attempt = 1; + loop { + let calc_precision = (LIMB_BITS << attempt) - 1; + attempt += 1; + + let calc_normal_from_limbs = |sig: &mut Vec, + limbs: &[Limb]| + -> StatusAnd { + sig.resize(limbs_for_bits(calc_precision), 0); + let (mut loss, mut exp) = sig::from_limbs(sig, limbs, calc_precision); + + // Before rounding normalize the exponent of Category::Normal numbers. + let mut omsb = sig::omsb(sig); + + assert_ne!(omsb, 0); + + // OMSB is numbered from 1. We want to place it in the integer + // bit numbered PRECISION if possible, with a compensating change in + // the exponent. + let final_exp = exp.saturating_add(omsb as ExpInt - calc_precision as ExpInt); + + // Shifting left is easy as we don't lose precision. + if final_exp < exp { + assert_eq!(loss, Loss::ExactlyZero); + + let exp_change = (exp - final_exp) as usize; + sig::shift_left(sig, &mut exp, exp_change); + + return Status::OK.and(exp); + } + + // Shift right and capture any new lost fraction. + if final_exp > exp { + let exp_change = (final_exp - exp) as usize; + loss = sig::shift_right(sig, &mut exp, exp_change).combine(loss); + + // Keep OMSB up-to-date. + omsb = omsb.saturating_sub(exp_change); + } + + assert_eq!(omsb, calc_precision); + + // Now round the number according to round given the lost + // fraction. + + // As specified in IEEE 754, since we do not trap we do not report + // underflow for exact results. + if loss == Loss::ExactlyZero { + return Status::OK.and(exp); + } + + // Increment the significand if we're rounding away from zero. + if loss == Loss::MoreThanHalf || loss == Loss::ExactlyHalf && sig::get_bit(sig, 0) { + // We should never overflow. + assert_eq!(sig::increment(sig), 0); + omsb = sig::omsb(sig); + + // Did the significand increment overflow? + if omsb == calc_precision + 1 { + let _: Loss = sig::shift_right(sig, &mut exp, 1); + + return Status::INEXACT.and(exp); + } + } + + // The normal case - we were and are not denormal, and any + // significand increment above didn't overflow. + Status::INEXACT.and(exp) + }; + + let status; + let mut exp = unpack!(status=, + calc_normal_from_limbs(&mut sig_calc, &dec_sig)); + let pow5_status; + let pow5_exp = unpack!(pow5_status=, + calc_normal_from_limbs(&mut pow5_calc, &pow5_full)); + + // Add dec_exp, as 10^n = 5^n * 2^n. + exp += dec_exp as ExpInt; + + let mut used_bits = S::PRECISION; + let mut truncated_bits = calc_precision - used_bits; + + let half_ulp_err1 = (status != Status::OK) as Limb; + let (calc_loss, half_ulp_err2); + if dec_exp >= 0 { + exp += pow5_exp; + + sig_scratch_calc.resize(sig_calc.len() + pow5_calc.len(), 0); + calc_loss = sig::mul( + &mut sig_scratch_calc, + &mut exp, + &sig_calc, + &pow5_calc, + calc_precision, + ); + mem::swap(&mut sig_calc, &mut sig_scratch_calc); + + half_ulp_err2 = (pow5_status != Status::OK) as Limb; + } else { + exp -= pow5_exp; + + sig_scratch_calc.resize(sig_calc.len(), 0); + calc_loss = sig::div( + &mut sig_scratch_calc, + &mut exp, + &mut sig_calc, + &mut pow5_calc, + calc_precision, + ); + mem::swap(&mut sig_calc, &mut sig_scratch_calc); + + // Denormal numbers have less precision. + if exp < S::MIN_EXP { + truncated_bits += (S::MIN_EXP - exp) as usize; + used_bits = calc_precision.saturating_sub(truncated_bits); + } + // Extra half-ulp lost in reciprocal of exponent. + half_ulp_err2 = 2 * + (pow5_status != Status::OK || calc_loss != Loss::ExactlyZero) as Limb; + } + + // Both sig::mul and sig::div return the + // result with the integer bit set. + assert!(sig::get_bit(&sig_calc, calc_precision - 1)); + + // The error from the true value, in half-ulps, on multiplying two + // floating point numbers, which differ from the value they + // approximate by at most half_ulp_err1 and half_ulp_err2 half-ulps, is strictly less + // than the returned value. + // + // See "How to Read Floating Point Numbers Accurately" by William D Clinger. + assert!( + half_ulp_err1 < 2 || half_ulp_err2 < 2 || (half_ulp_err1 + half_ulp_err2 < 8) + ); + + let inexact = (calc_loss != Loss::ExactlyZero) as Limb; + let half_ulp_err = if half_ulp_err1 + half_ulp_err2 == 0 { + inexact * 2 // <= inexact half-ulps. + } else { + inexact + 2 * (half_ulp_err1 + half_ulp_err2) + }; + + let ulps_from_boundary = { + let bits = calc_precision - used_bits - 1; + + let i = bits / LIMB_BITS; + let limb = sig_calc[i] & (!0 >> (LIMB_BITS - 1 - bits % LIMB_BITS)); + let boundary = match round { + Round::NearestTiesToEven | Round::NearestTiesToAway => 1 << (bits % LIMB_BITS), + _ => 0, + }; + if i == 0 { + let delta = limb.wrapping_sub(boundary); + cmp::min(delta, delta.wrapping_neg()) + } else if limb == boundary { + if !sig::is_all_zeros(&sig_calc[1..i]) { + !0 // A lot. + } else { + sig_calc[0] + } + } else if limb == boundary.wrapping_sub(1) { + if sig_calc[1..i].iter().any(|&x| x.wrapping_neg() != 1) { + !0 // A lot. + } else { + sig_calc[0].wrapping_neg() + } + } else { + !0 // A lot. + } + }; + + // Are we guaranteed to round correctly if we truncate? + if ulps_from_boundary.saturating_mul(2) >= half_ulp_err { + let mut r = IeeeFloat { + sig: [0], + exp, + category: Category::Normal, + sign: false, + marker: PhantomData, + }; + sig::extract(&mut r.sig, &sig_calc, used_bits, calc_precision - used_bits); + // If we extracted less bits above we must adjust our exponent + // to compensate for the implicit right shift. + r.exp += (S::PRECISION - used_bits) as ExpInt; + let loss = Loss::through_truncation(&sig_calc, truncated_bits); + return Ok(r.normalize(round, loss)); + } + } + } +} + +impl Loss { + /// Combine the effect of two lost fractions. + fn combine(self, less_significant: Loss) -> Loss { + let mut more_significant = self; + if less_significant != Loss::ExactlyZero { + if more_significant == Loss::ExactlyZero { + more_significant = Loss::LessThanHalf; + } else if more_significant == Loss::ExactlyHalf { + more_significant = Loss::MoreThanHalf; + } + } + + more_significant + } + + /// Return the fraction lost were a bignum truncated losing the least + /// significant `bits` bits. + fn through_truncation(limbs: &[Limb], bits: usize) -> Loss { + if bits == 0 { + return Loss::ExactlyZero; + } + + let half_bit = bits - 1; + let half_limb = half_bit / LIMB_BITS; + let (half_limb, rest) = if half_limb < limbs.len() { + (limbs[half_limb], &limbs[..half_limb]) + } else { + (0, limbs) + }; + let half = 1 << (half_bit % LIMB_BITS); + let has_half = half_limb & half != 0; + let has_rest = half_limb & (half - 1) != 0 || !sig::is_all_zeros(rest); + + match (has_half, has_rest) { + (false, false) => Loss::ExactlyZero, + (false, true) => Loss::LessThanHalf, + (true, false) => Loss::ExactlyHalf, + (true, true) => Loss::MoreThanHalf, + } + } +} + +/// Implementation details of IeeeFloat significands, such as big integer arithmetic. +/// As a rule of thumb, no functions in this module should dynamically allocate. +mod sig { + use std::cmp::Ordering; + use std::mem; + use super::{ExpInt, Limb, LIMB_BITS, limbs_for_bits, Loss}; + + pub(super) fn is_all_zeros(limbs: &[Limb]) -> bool { + limbs.iter().all(|&l| l == 0) + } + + /// One, not zero, based MSB. That is, returns 0 for a zeroed significand. + pub(super) fn omsb(limbs: &[Limb]) -> usize { + for i in (0..limbs.len()).rev() { + if limbs[i] != 0 { + return (i + 1) * LIMB_BITS - limbs[i].leading_zeros() as usize; + } + } + + 0 + } + + /// Comparison (unsigned) of two significands. + pub(super) fn cmp(a: &[Limb], b: &[Limb]) -> Ordering { + assert_eq!(a.len(), b.len()); + for (a, b) in a.iter().zip(b).rev() { + match a.cmp(b) { + Ordering::Equal => {} + o => return o, + } + } + + Ordering::Equal + } + + /// Extract the given bit. + pub(super) fn get_bit(limbs: &[Limb], bit: usize) -> bool { + limbs[bit / LIMB_BITS] & (1 << (bit % LIMB_BITS)) != 0 + } + + /// Set the given bit. + pub(super) fn set_bit(limbs: &mut [Limb], bit: usize) { + limbs[bit / LIMB_BITS] |= 1 << (bit % LIMB_BITS); + } + + /// Clear the given bit. + pub(super) fn clear_bit(limbs: &mut [Limb], bit: usize) { + limbs[bit / LIMB_BITS] &= !(1 << (bit % LIMB_BITS)); + } + + /// Shift `dst` left `bits` bits, subtract `bits` from its exponent. + pub(super) fn shift_left(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) { + if bits > 0 { + // Our exponent should not underflow. + *exp = exp.checked_sub(bits as ExpInt).unwrap(); + + // Jump is the inter-limb jump; shift is is intra-limb shift. + let jump = bits / LIMB_BITS; + let shift = bits % LIMB_BITS; + + for i in (0..dst.len()).rev() { + let mut limb; + + if i < jump { + limb = 0; + } else { + // dst[i] comes from the two limbs src[i - jump] and, if we have + // an intra-limb shift, src[i - jump - 1]. + limb = dst[i - jump]; + if shift > 0 { + limb <<= shift; + if i >= jump + 1 { + limb |= dst[i - jump - 1] >> (LIMB_BITS - shift); + } + } + } + + dst[i] = limb; + } + } + } + + /// Shift `dst` right `bits` bits noting lost fraction. + pub(super) fn shift_right(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) -> Loss { + let loss = Loss::through_truncation(dst, bits); + + if bits > 0 { + // Our exponent should not overflow. + *exp = exp.checked_add(bits as ExpInt).unwrap(); + + // Jump is the inter-limb jump; shift is is intra-limb shift. + let jump = bits / LIMB_BITS; + let shift = bits % LIMB_BITS; + + // Perform the shift. This leaves the most significant `bits` bits + // of the result at zero. + for i in 0..dst.len() { + let mut limb; + + if i + jump >= dst.len() { + limb = 0; + } else { + limb = dst[i + jump]; + if shift > 0 { + limb >>= shift; + if i + jump + 1 < dst.len() { + limb |= dst[i + jump + 1] << (LIMB_BITS - shift); + } + } + } + + dst[i] = limb; + } + } + + loss + } + + /// Copy the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB, + /// to `dst`, such that the bit SRC_LSB becomes the least significant bit of `dst`. + /// All high bits above `src_bits` in `dst` are zero-filled. + pub(super) fn extract(dst: &mut [Limb], src: &[Limb], src_bits: usize, src_lsb: usize) { + if src_bits == 0 { + return; + } + + let dst_limbs = limbs_for_bits(src_bits); + assert!(dst_limbs <= dst.len()); + + let src = &src[src_lsb / LIMB_BITS..]; + dst[..dst_limbs].copy_from_slice(&src[..dst_limbs]); + + let shift = src_lsb % LIMB_BITS; + let _: Loss = shift_right(&mut dst[..dst_limbs], &mut 0, shift); + + // We now have (dst_limbs * LIMB_BITS - shift) bits from `src` + // in `dst`. If this is less that src_bits, append the rest, else + // clear the high bits. + let n = dst_limbs * LIMB_BITS - shift; + if n < src_bits { + let mask = (1 << (src_bits - n)) - 1; + dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << n % LIMB_BITS; + } else if n > src_bits && src_bits % LIMB_BITS > 0 { + dst[dst_limbs - 1] &= (1 << (src_bits % LIMB_BITS)) - 1; + } + + // Clear high limbs. + for x in &mut dst[dst_limbs..] { + *x = 0; + } + } + + /// We want the most significant PRECISION bits of `src`. There may not + /// be that many; extract what we can. + pub(super) fn from_limbs(dst: &mut [Limb], src: &[Limb], precision: usize) -> (Loss, ExpInt) { + let omsb = omsb(src); + + if precision <= omsb { + extract(dst, src, precision, omsb - precision); + ( + Loss::through_truncation(src, omsb - precision), + omsb as ExpInt - 1, + ) + } else { + extract(dst, src, omsb, 0); + (Loss::ExactlyZero, precision as ExpInt - 1) + } + } + + /// Increment in-place, return the carry flag. + pub(super) fn increment(dst: &mut [Limb]) -> Limb { + for x in dst { + *x = x.wrapping_add(1); + if *x != 0 { + return 0; + } + } + + 1 + } + + /// Decrement in-place, return the borrow flag. + pub(super) fn decrement(dst: &mut [Limb]) -> Limb { + for x in dst { + *x = x.wrapping_sub(1); + if *x != !0 { + return 0; + } + } + + 1 + } + + /// `a += b + c` where `c` is zero or one. Returns the carry flag. + pub(super) fn add(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb { + assert!(c <= 1); + + for (a, &b) in a.iter_mut().zip(b) { + let (r, overflow) = a.overflowing_add(b); + let (r, overflow2) = r.overflowing_add(c); + *a = r; + c = (overflow | overflow2) as Limb; + } + + c + } + + /// `a -= b + c` where `c` is zero or one. Returns the borrow flag. + pub(super) fn sub(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb { + assert!(c <= 1); + + for (a, &b) in a.iter_mut().zip(b) { + let (r, overflow) = a.overflowing_sub(b); + let (r, overflow2) = r.overflowing_sub(c); + *a = r; + c = (overflow | overflow2) as Limb; + } + + c + } + + /// `a += b` or `a -= b`. Does not preserve `b`. + pub(super) fn add_or_sub( + a_sig: &mut [Limb], + a_exp: &mut ExpInt, + a_sign: &mut bool, + b_sig: &mut [Limb], + b_exp: ExpInt, + b_sign: bool, + ) -> Loss { + // Are we bigger exponent-wise than the RHS? + let bits = *a_exp - b_exp; + + // Determine if the operation on the absolute values is effectively + // an addition or subtraction. + // Subtraction is more subtle than one might naively expect. + if *a_sign ^ b_sign { + let (reverse, loss); + + if bits == 0 { + reverse = cmp(a_sig, b_sig) == Ordering::Less; + loss = Loss::ExactlyZero; + } else if bits > 0 { + loss = shift_right(b_sig, &mut 0, (bits - 1) as usize); + shift_left(a_sig, a_exp, 1); + reverse = false; + } else { + loss = shift_right(a_sig, a_exp, (-bits - 1) as usize); + shift_left(b_sig, &mut 0, 1); + reverse = true; + } + + let borrow = (loss != Loss::ExactlyZero) as Limb; + if reverse { + // The code above is intended to ensure that no borrow is necessary. + assert_eq!(sub(b_sig, a_sig, borrow), 0); + a_sig.copy_from_slice(b_sig); + *a_sign = !*a_sign; + } else { + // The code above is intended to ensure that no borrow is necessary. + assert_eq!(sub(a_sig, b_sig, borrow), 0); + } + + // Invert the lost fraction - it was on the RHS and subtracted. + match loss { + Loss::LessThanHalf => Loss::MoreThanHalf, + Loss::MoreThanHalf => Loss::LessThanHalf, + _ => loss, + } + } else { + let loss = if bits > 0 { + shift_right(b_sig, &mut 0, bits as usize) + } else { + shift_right(a_sig, a_exp, -bits as usize) + }; + // We have a guard bit; generating a carry cannot happen. + assert_eq!(add(a_sig, b_sig, 0), 0); + loss + } + } + + /// `[low, high] = a * b`. + /// + /// This cannot overflow, because + /// + /// `(n - 1) * (n - 1) + 2 * (n - 1) == (n - 1) * (n + 1)` + /// + /// which is less than n^2. + pub(super) fn widening_mul(a: Limb, b: Limb) -> [Limb; 2] { + let mut wide = [0, 0]; + + if a == 0 || b == 0 { + return wide; + } + + const HALF_BITS: usize = LIMB_BITS / 2; + + let select = |limb, i| (limb >> (i * HALF_BITS)) & ((1 << HALF_BITS) - 1); + for i in 0..2 { + for j in 0..2 { + let mut x = [select(a, i) * select(b, j), 0]; + shift_left(&mut x, &mut 0, (i + j) * HALF_BITS); + assert_eq!(add(&mut wide, &x, 0), 0); + } + } + + wide + } + + /// `dst = a * b` (for normal `a` and `b`). Returns the lost fraction. + pub(super) fn mul<'a>( + dst: &mut [Limb], + exp: &mut ExpInt, + mut a: &'a [Limb], + mut b: &'a [Limb], + precision: usize, + ) -> Loss { + // Put the narrower number on the `a` for less loops below. + if a.len() > b.len() { + mem::swap(&mut a, &mut b); + } + + for x in &mut dst[..b.len()] { + *x = 0; + } + + for i in 0..a.len() { + let mut carry = 0; + for j in 0..b.len() { + let [low, mut high] = widening_mul(a[i], b[j]); + + // Now add carry. + let (low, overflow) = low.overflowing_add(carry); + high += overflow as Limb; + + // And now `dst[i + j]`, and store the new low part there. + let (low, overflow) = low.overflowing_add(dst[i + j]); + high += overflow as Limb; + + dst[i + j] = low; + carry = high; + } + dst[i + b.len()] = carry; + } + + // Assume the operands involved in the multiplication are single-precision + // FP, and the two multiplicants are: + // a = a23 . a22 ... a0 * 2^e1 + // b = b23 . b22 ... b0 * 2^e2 + // the result of multiplication is: + // dst = c48 c47 c46 . c45 ... c0 * 2^(e1+e2) + // Note that there are three significant bits at the left-hand side of the + // radix point: two for the multiplication, and an overflow bit for the + // addition (that will always be zero at this point). Move the radix point + // toward left by two bits, and adjust exponent accordingly. + *exp += 2; + + // Convert the result having "2 * precision" significant-bits back to the one + // having "precision" significant-bits. First, move the radix point from + // poision "2*precision - 1" to "precision - 1". The exponent need to be + // adjusted by "2*precision - 1" - "precision - 1" = "precision". + *exp -= precision as ExpInt + 1; + + // In case MSB resides at the left-hand side of radix point, shift the + // mantissa right by some amount to make sure the MSB reside right before + // the radix point (i.e. "MSB . rest-significant-bits"). + // + // Note that the result is not normalized when "omsb < precision". So, the + // caller needs to call IeeeFloat::normalize() if normalized value is + // expected. + let omsb = omsb(dst); + if omsb <= precision { + Loss::ExactlyZero + } else { + shift_right(dst, exp, omsb - precision) + } + } + + /// `quotient = dividend / divisor`. Returns the lost fraction. + /// Does not preserve `dividend` or `divisor`. + pub(super) fn div( + quotient: &mut [Limb], + exp: &mut ExpInt, + dividend: &mut [Limb], + divisor: &mut [Limb], + precision: usize, + ) -> Loss { + // Zero the quotient before setting bits in it. + for x in &mut quotient[..limbs_for_bits(precision)] { + *x = 0; + } + + // Normalize the divisor. + let bits = precision - omsb(divisor); + shift_left(divisor, &mut 0, bits); + *exp += bits as ExpInt; + + // Normalize the dividend. + let bits = precision - omsb(dividend); + shift_left(dividend, exp, bits); + + // Ensure the dividend >= divisor initially for the loop below. + // Incidentally, this means that the division loop below is + // guaranteed to set the integer bit to one. + if cmp(dividend, divisor) == Ordering::Less { + shift_left(dividend, exp, 1); + assert_ne!(cmp(dividend, divisor), Ordering::Less) + } + + // Long division. + for bit in (0..precision).rev() { + if cmp(dividend, divisor) != Ordering::Less { + sub(dividend, divisor, 0); + set_bit(quotient, bit); + } + shift_left(dividend, &mut 0, 1); + } + + // Figure out the lost fraction. + match cmp(dividend, divisor) { + Ordering::Greater => Loss::MoreThanHalf, + Ordering::Equal => Loss::ExactlyHalf, + Ordering::Less => { + if is_all_zeros(dividend) { + Loss::ExactlyZero + } else { + Loss::LessThanHalf + } + } + } } } diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs index eb372b52c0a1..d9dbf7878565 100644 --- a/src/librustc_apfloat/lib.rs +++ b/src/librustc_apfloat/lib.rs @@ -48,6 +48,8 @@ #![feature(const_fn)] #![feature(i128_type)] +#![feature(slice_patterns)] +#![feature(try_from)] #[macro_use] extern crate rustc_bitflags; diff --git a/src/librustc_apfloat/ppc.rs b/src/librustc_apfloat/ppc.rs index 03c4830d49e8..dec88eb62cc6 100644 --- a/src/librustc_apfloat/ppc.rs +++ b/src/librustc_apfloat/ppc.rs @@ -8,7 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use {ieee, Category, ExpInt, Float, Round, ParseError, StatusAnd}; +use {Category, ExpInt, Float, FloatConvert, Round, ParseError, Status, StatusAnd}; +use ieee; use std::cmp::Ordering; use std::fmt; @@ -49,24 +50,99 @@ impl ieee::Semantics for FallbackS { const MIN_EXP: ExpInt = F::MIN_EXP as ExpInt + F::PRECISION as ExpInt; } +// Convert number to F. To avoid spurious underflows, we re- +// normalize against the F exponent range first, and only *then* +// truncate the mantissa. The result of that second conversion +// may be inexact, but should never underflow. +// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds. +pub struct FallbackExtendedS(F); +type FallbackExtended = ieee::IeeeFloat>; +impl ieee::Semantics for FallbackExtendedS { + // Forbid any conversion to/from bits. + const BITS: usize = 0; + const PRECISION: usize = Fallback::::PRECISION; + const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt; +} + +impl From> for DoubleFloat +where + F: FloatConvert>, + FallbackExtended: FloatConvert, +{ + fn from(x: Fallback) -> Self { + let mut status; + let mut loses_info = false; + + let extended: FallbackExtended = unpack!(status=, x.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + + let a = unpack!(status=, extended.convert(&mut loses_info)); + assert_eq!(status - Status::INEXACT, Status::OK); + + // If conversion was exact or resulted in a special case, we're done; + // just set the second double to zero. Otherwise, re-convert back to + // the extended format and compute the difference. This now should + // convert exactly to double. + let b = if a.is_finite_non_zero() && loses_info { + let u: FallbackExtended = unpack!(status=, a.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + let v = unpack!(status=, extended - u); + assert_eq!(status, Status::OK); + let v = unpack!(status=, v.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + v + } else { + F::ZERO + }; + + DoubleFloat(a, b) + } +} + +impl> From> for Fallback { + fn from(DoubleFloat(a, b): DoubleFloat) -> Self { + let mut status; + let mut loses_info = false; + + // Get the first F and convert to our format. + let a = unpack!(status=, a.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + + // Unless we have a special case, add in second F. + if a.is_finite_non_zero() { + let b = unpack!(status=, b.convert(&mut loses_info)); + assert_eq!((status, loses_info), (Status::OK, false)); + + (a + b).value + } else { + a + } + } +} + float_common_impls!(DoubleFloat); impl Neg for DoubleFloat { type Output = Self; fn neg(self) -> Self { - panic!("NYI Neg::neg"); + if self.1.is_finite_non_zero() { + DoubleFloat(-self.0, -self.1) + } else { + DoubleFloat(-self.0, self.1) + } } } -#[allow(unused)] -impl fmt::Display for DoubleFloat { +impl>> fmt::Display for DoubleFloat { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - panic!("NYI Display::fmt"); + fmt::Display::fmt(&Fallback::from(*self), f) } } -#[allow(unused)] -impl Float for DoubleFloat { +impl>> Float for DoubleFloat +where + Self: From>, +{ const BITS: usize = F::BITS * 2; const PRECISION: usize = Fallback::::PRECISION; const MAX_EXP: ExpInt = Fallback::::MAX_EXP; @@ -80,108 +156,306 @@ impl Float for DoubleFloat { const NAN: Self = DoubleFloat(F::NAN, F::ZERO); fn qnan(payload: Option) -> Self { - panic!("NYI qnan") + DoubleFloat(F::qnan(payload), F::ZERO) } fn snan(payload: Option) -> Self { - panic!("NYI snan") + DoubleFloat(F::snan(payload), F::ZERO) } fn largest() -> Self { - panic!("NYI largest") + let status; + let mut r = DoubleFloat(F::largest(), F::largest()); + r.1 = r.1.scalbn(-(F::PRECISION as ExpInt + 1)); + r.1 = unpack!(status=, r.1.next_down()); + assert_eq!(status, Status::OK); + r } const SMALLEST: Self = DoubleFloat(F::SMALLEST, F::ZERO); fn smallest_normalized() -> Self { - panic!("NYI smallest_normalized") + DoubleFloat( + F::smallest_normalized().scalbn(F::PRECISION as ExpInt), + F::ZERO, + ) } - fn add_r(self, rhs: Self, round: Round) -> StatusAnd { - panic!("NYI add_r") + // Implement addition, subtraction, multiplication and division based on: + // "Software for Doubled-Precision Floating-Point Computations", + // by Seppo Linnainmaa, ACM TOMS vol 7 no 3, September 1981, pages 272-283. + + fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd { + match (self.category(), rhs.category()) { + (Category::Infinity, Category::Infinity) => { + if self.is_negative() != rhs.is_negative() { + Status::INVALID_OP.and(Self::NAN.copy_sign(self)) + } else { + Status::OK.and(self) + } + } + + (_, Category::Zero) | + (Category::NaN, _) | + (Category::Infinity, Category::Normal) => Status::OK.and(self), + + (Category::Zero, _) | + (_, Category::NaN) | + (_, Category::Infinity) => Status::OK.and(rhs), + + (Category::Normal, Category::Normal) => { + let mut status = Status::OK; + let (a, aa, c, cc) = (self.0, self.1, rhs.0, rhs.1); + let mut z = a; + z = unpack!(status|=, z.add_r(c, round)); + if !z.is_finite() { + if !z.is_infinite() { + return status.and(DoubleFloat(z, F::ZERO)); + } + status = Status::OK; + let a_cmp_c = a.cmp_abs_normal(c); + z = cc; + z = unpack!(status|=, z.add_r(aa, round)); + if a_cmp_c == Ordering::Greater { + // z = cc + aa + c + a; + z = unpack!(status|=, z.add_r(c, round)); + z = unpack!(status|=, z.add_r(a, round)); + } else { + // z = cc + aa + a + c; + z = unpack!(status|=, z.add_r(a, round)); + z = unpack!(status|=, z.add_r(c, round)); + } + if !z.is_finite() { + return status.and(DoubleFloat(z, F::ZERO)); + } + self.0 = z; + let mut zz = aa; + zz = unpack!(status|=, zz.add_r(cc, round)); + if a_cmp_c == Ordering::Greater { + // self.1 = a - z + c + zz; + self.1 = a; + self.1 = unpack!(status|=, self.1.sub_r(z, round)); + self.1 = unpack!(status|=, self.1.add_r(c, round)); + self.1 = unpack!(status|=, self.1.add_r(zz, round)); + } else { + // self.1 = c - z + a + zz; + self.1 = c; + self.1 = unpack!(status|=, self.1.sub_r(z, round)); + self.1 = unpack!(status|=, self.1.add_r(a, round)); + self.1 = unpack!(status|=, self.1.add_r(zz, round)); + } + } else { + // q = a - z; + let mut q = a; + q = unpack!(status|=, q.sub_r(z, round)); + + // zz = q + c + (a - (q + z)) + aa + cc; + // Compute a - (q + z) as -((q + z) - a) to avoid temporary copies. + let mut zz = q; + zz = unpack!(status|=, zz.add_r(c, round)); + q = unpack!(status|=, q.add_r(z, round)); + q = unpack!(status|=, q.sub_r(a, round)); + q = -q; + zz = unpack!(status|=, zz.add_r(q, round)); + zz = unpack!(status|=, zz.add_r(aa, round)); + zz = unpack!(status|=, zz.add_r(cc, round)); + if zz.is_zero() && !zz.is_negative() { + return Status::OK.and(DoubleFloat(z, F::ZERO)); + } + self.0 = z; + self.0 = unpack!(status|=, self.0.add_r(zz, round)); + if !self.0.is_finite() { + self.1 = F::ZERO; + return status.and(self); + } + self.1 = z; + self.1 = unpack!(status|=, self.1.sub_r(self.0, round)); + self.1 = unpack!(status|=, self.1.add_r(zz, round)); + } + status.and(self) + } + } } - fn mul_r(self, rhs: Self, round: Round) -> StatusAnd { - panic!("NYI mul_r") + fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd { + // Interesting observation: For special categories, finding the lowest + // common ancestor of the following layered graph gives the correct + // return category: + // + // NaN + // / \ + // Zero Inf + // \ / + // Normal + // + // e.g. NaN * NaN = NaN + // Zero * Inf = NaN + // Normal * Zero = Zero + // Normal * Inf = Inf + match (self.category(), rhs.category()) { + (Category::NaN, _) => Status::OK.and(self), + + (_, Category::NaN) => Status::OK.and(rhs), + + (Category::Zero, Category::Infinity) | + (Category::Infinity, Category::Zero) => Status::OK.and(Self::NAN), + + (Category::Zero, _) | + (Category::Infinity, _) => Status::OK.and(self), + + (_, Category::Zero) | + (_, Category::Infinity) => Status::OK.and(rhs), + + (Category::Normal, Category::Normal) => { + let mut status = Status::OK; + let (a, b, c, d) = (self.0, self.1, rhs.0, rhs.1); + // t = a * c + let mut t = a; + t = unpack!(status|=, t.mul_r(c, round)); + if !t.is_finite_non_zero() { + return status.and(DoubleFloat(t, F::ZERO)); + } + + // tau = fmsub(a, c, t), that is -fmadd(-a, c, t). + let mut tau = a; + tau = unpack!(status|=, tau.mul_add_r(c, -t, round)); + // v = a * d + let mut v = a; + v = unpack!(status|=, v.mul_r(d, round)); + // w = b * c + let mut w = b; + w = unpack!(status|=, w.mul_r(c, round)); + v = unpack!(status|=, v.add_r(w, round)); + // tau += v + w + tau = unpack!(status|=, tau.add_r(v, round)); + // u = t + tau + let mut u = t; + u = unpack!(status|=, u.add_r(tau, round)); + + self.0 = u; + if !u.is_finite() { + self.1 = F::ZERO; + } else { + // self.1 = (t - u) + tau + t = unpack!(status|=, t.sub_r(u, round)); + t = unpack!(status|=, t.add_r(tau, round)); + self.1 = t; + } + status.and(self) + } + } } fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd { - panic!("NYI mul_add_r") + Fallback::from(self) + .mul_add_r(Fallback::from(multiplicand), Fallback::from(addend), round) + .map(Self::from) } fn div_r(self, rhs: Self, round: Round) -> StatusAnd { - panic!("NYI div_r") + Fallback::from(self).div_r(Fallback::from(rhs), round).map( + Self::from, + ) } fn c_fmod(self, rhs: Self) -> StatusAnd { - panic!("NYI c_fmod") + Fallback::from(self).c_fmod(Fallback::from(rhs)).map( + Self::from, + ) } fn round_to_integral(self, round: Round) -> StatusAnd { - panic!("NYI round_to_integral") + Fallback::from(self).round_to_integral(round).map( + Self::from, + ) } fn next_up(self) -> StatusAnd { - panic!("NYI next_up") + Fallback::from(self).next_up().map(Self::from) } fn from_bits(input: u128) -> Self { - panic!("NYI from_bits") + let (a, b) = (input, input >> F::BITS); + DoubleFloat( + F::from_bits(a & ((1 << F::BITS) - 1)), + F::from_bits(b & ((1 << F::BITS) - 1)), + ) } fn from_u128_r(input: u128, round: Round) -> StatusAnd { - panic!("NYI from_u128_r") + Fallback::from_u128_r(input, round).map(Self::from) } fn from_str_r(s: &str, round: Round) -> Result, ParseError> { - panic!("NYI from_str_r") + Fallback::from_str_r(s, round).map(|r| r.map(Self::from)) } fn to_bits(self) -> u128 { - panic!("NYI to_bits") + self.0.to_bits() | (self.1.to_bits() << F::BITS) } fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd { - panic!("NYI to_u128_r"); + Fallback::from(self).to_u128_r(width, round, is_exact) } fn cmp_abs_normal(self, rhs: Self) -> Ordering { - panic!("NYI cmp_abs_normal") + self.0.cmp_abs_normal(rhs.0).then_with(|| { + let result = self.1.cmp_abs_normal(rhs.1); + if result != Ordering::Equal { + let against = self.0.is_negative() ^ self.1.is_negative(); + let rhs_against = rhs.0.is_negative() ^ rhs.1.is_negative(); + (!against).cmp(&!rhs_against).then_with(|| if against { + result.reverse() + } else { + result + }) + } else { + result + } + }) } fn bitwise_eq(self, rhs: Self) -> bool { - panic!("NYI bitwise_eq") + self.0.bitwise_eq(rhs.0) && self.1.bitwise_eq(rhs.1) } fn is_negative(self) -> bool { - panic!("NYI is_negative") + self.0.is_negative() } fn is_denormal(self) -> bool { - panic!("NYI is_denormal") + self.category() == Category::Normal && + (self.0.is_denormal() || self.0.is_denormal() || + // (double)(Hi + Lo) == Hi defines a normal number. + !(self.0 + self.1).value.bitwise_eq(self.0)) } fn is_signaling(self) -> bool { - panic!("NYI is_signaling") + self.0.is_signaling() } fn category(self) -> Category { - panic!("NYI category") + self.0.category() } fn get_exact_inverse(self) -> Option { - panic!("NYI get_exact_inverse") + Fallback::from(self).get_exact_inverse().map(Self::from) } fn ilogb(self) -> ExpInt { - panic!("NYI ilogb") + self.0.ilogb() } fn scalbn_r(self, exp: ExpInt, round: Round) -> Self { - panic!("NYI scalbn") + DoubleFloat(self.0.scalbn_r(exp, round), self.1.scalbn_r(exp, round)) } fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self { - panic!("NYI frexp") + let a = self.0.frexp_r(exp, round); + let mut b = self.1; + if self.category() == Category::Normal { + b = b.scalbn_r(-*exp, round); + } + DoubleFloat(a, b) } } From 321a72c1c1329415c6b94cc18b42b05c1ce8b59d Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 1 Aug 2017 18:26:28 -0700 Subject: [PATCH 150/213] closure unsafety check: stop moving up when we hit an item --- src/librustc_mir/transform/add_validation.rs | 30 ++++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index 2afaa0701181..bbd2829c303e 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -128,29 +128,29 @@ fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> let mut cur = fn_like.id(); loop { // Go further upwards. - let parent = tcx.hir.get_parent_node(cur); - if cur == parent { - bug!("Closures muts be inside a non-closure fn_like"); + cur = tcx.hir.get_parent_node(cur); + let node = tcx.hir.get(cur); + // Check if this is an unsafe function + if let Some(fn_like) = FnLikeNode::from_node(node) { + if !fn_is_closure(fn_like) { + if fn_like.unsafety() == hir::Unsafety::Unsafe { + return true; + } + } } - cur = parent; - // Check if this is an unsafe block - match tcx.hir.find(cur) { - Some(Node::NodeExpr(&hir::Expr { node: hir::ExprBlock(ref block), ..})) => { + // Check if this is an unsafe block, or an item + match node { + Node::NodeExpr(&hir::Expr { node: hir::ExprBlock(ref block), ..}) => { if block_is_unsafe(&*block) { // Found an unsafe block, we can bail out here. return true; } } - _ => {}, - } - // Check if this is a non-closure fn_like, at which point we have to stop moving up - if let Some(fn_like) = FnLikeNode::from_node(tcx.hir.get(cur)) { - if !fn_is_closure(fn_like) { - if fn_like.unsafety() == hir::Unsafety::Unsafe { - return true; - } + Node::NodeItem(..) => { + // No walking up beyond items. This makes sure the loop always terminates. break; } + _ => {}, } } } From 92836e397c5935c7cd8a7529de03bc5649931fa9 Mon Sep 17 00:00:00 2001 From: Daan Sprenkels Date: Wed, 2 Aug 2017 03:44:11 +0200 Subject: [PATCH 151/213] Add regression test for #40510 --- src/test/compile-fail/issue-40510.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 src/test/compile-fail/issue-40510.rs diff --git a/src/test/compile-fail/issue-40510.rs b/src/test/compile-fail/issue-40510.rs new file mode 100644 index 000000000000..142092ff41eb --- /dev/null +++ b/src/test/compile-fail/issue-40510.rs @@ -0,0 +1,23 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] +#![allow(unused)] + +fn f() { + let mut x: Box<()> = Box::new(()); + + || { + &mut x + }; +} + +#[rustc_error] +fn main() {} //~ ERROR compilation successful From 5c4c2d9235c5de88a217f03768cf6bc950fe6890 Mon Sep 17 00:00:00 2001 From: QuietMisdreavus Date: Tue, 1 Aug 2017 17:08:33 -0500 Subject: [PATCH 152/213] rustdoc: shrink headings in non-top-level docblocks --- src/librustdoc/html/static/rustdoc.css | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/librustdoc/html/static/rustdoc.css b/src/librustdoc/html/static/rustdoc.css index 858ef3bf411d..b6a342ea3b0d 100644 --- a/src/librustdoc/html/static/rustdoc.css +++ b/src/librustdoc/html/static/rustdoc.css @@ -274,9 +274,13 @@ nav.sub { border-bottom: 1px solid; } -.docblock h1 { font-size: 1.3em; } -.docblock h2 { font-size: 1.15em; } -.docblock h3, .docblock h4, .docblock h5 { font-size: 1em; } +#main > .docblock h1 { font-size: 1.3em; } +#main > .docblock h2 { font-size: 1.15em; } +#main > .docblock h3, #main > .docblock h4, #main > .docblock h5 { font-size: 1em; } + +.docblock h1 { font-size: 1em; } +.docblock h2 { font-size: 0.95em; } +.docblock h3, .docblock h4, .docblock h5 { font-size: 0.9em; } .docblock { margin-left: 24px; From e92ddbf5f7b9b1c6d61f3b6138035dfd5eee2c68 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 1 Aug 2017 19:13:47 -0700 Subject: [PATCH 153/213] improve hir::map::Map::get_parent_node doc --- src/librustc/hir/map/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index 3fdd9c34f46d..45b1d6c18410 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -553,7 +553,9 @@ impl<'hir> Map<'hir> { } /// Similar to get_parent, returns the parent node id or id if there is no - /// parent. + /// parent. Note that the parent may be CRATE_NODE_ID, which is not itself + /// present in the map -- so passing the return value of get_parent_node to + /// get may actually panic. /// This function returns the immediate parent in the AST, whereas get_parent /// returns the enclosing item. Note that this might not be the actual parent /// node in the AST - some kinds of nodes are not in the map and these will @@ -629,7 +631,7 @@ impl<'hir> Map<'hir> { } /// Retrieve the NodeId for `id`'s enclosing method, unless there's a - /// `while` or `loop` before reacing it, as block tail returns are not + /// `while` or `loop` before reaching it, as block tail returns are not /// available in them. /// /// ``` From fefe63ce04d61ade8d8335fbf384775d12117fd5 Mon Sep 17 00:00:00 2001 From: Mario Idival Date: Wed, 2 Aug 2017 00:06:42 -0300 Subject: [PATCH 154/213] Remove unecessary test code #43219 --- src/test/run-pass/core-run-destroy.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/test/run-pass/core-run-destroy.rs b/src/test/run-pass/core-run-destroy.rs index c5b5b6b24ab1..22fbeb2d5d0e 100644 --- a/src/test/run-pass/core-run-destroy.rs +++ b/src/test/run-pass/core-run-destroy.rs @@ -15,7 +15,6 @@ // memory, which makes for some *confusing* logs. That's why these are here // instead of in std. -#![reexport_test_harness_main = "test_main"] #![feature(libc, std_misc, duration)] extern crate libc; From 4e6c1dddff853703bd31a8f3390e8aa2d4856fe8 Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Wed, 2 Aug 2017 15:46:59 +1200 Subject: [PATCH 155/213] save-analysis: only emit public fields in value of a struct if the config permits --- src/librustc_save_analysis/dump_visitor.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index f74e8cb21608..4cfd570adead 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -560,14 +560,20 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { let (value, fields) = if let ast::ItemKind::Struct(ast::VariantData::Struct(ref fields, _), _) = item.node { + let include_priv_fields = !self.save_ctxt.config.pub_only; let fields_str = fields.iter() .enumerate() - .map(|(i, f)| f.ident.map(|i| i.to_string()) - .unwrap_or(i.to_string())) + .filter_map(|(i, f)| { + if include_priv_fields || f.vis == ast::Visibility::Public { + f.ident.map(|i| i.to_string()).or_else(|| Some(i.to_string())) + } else { + None + } + }) .collect::>() .join(", "); - (format!("{} {{ {} }}", name, fields_str), - fields.iter().map(|f| ::id_from_node_id(f.id, &self.save_ctxt)).collect()) + let value = format!("{} {{ {} }}", name, fields_str); + (value, fields.iter().map(|f| ::id_from_node_id(f.id, &self.save_ctxt)).collect()) } else { (String::new(), vec![]) }; From 7d8dc7a979975ab6d8aab29cfa0b69e8a64f1280 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 1 Aug 2017 21:06:33 -0700 Subject: [PATCH 156/213] also release-validate return value before a call --- src/librustc_mir/transform/add_validation.rs | 21 ++++++++++++-------- src/test/mir-opt/validate_1.rs | 2 +- src/test/mir-opt/validate_3.rs | 2 +- src/test/mir-opt/validate_5.rs | 2 +- 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs index bbd2829c303e..52c2eaa7cb63 100644 --- a/src/librustc_mir/transform/add_validation.rs +++ b/src/librustc_mir/transform/add_validation.rs @@ -248,18 +248,23 @@ impl MirPass for AddValidation { match block_data.terminator { Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. }, source_info }) => { - // Before the call: Release all arguments + // Before the call: Release all arguments *and* the return value. + // The callee may write into the return value! Note that this relies + // on "release of uninitialized" to be a NOP. if !restricted_validation { let release_stmt = Statement { source_info, kind: StatementKind::Validate(ValidationOp::Release, - args.iter().filter_map(|op| { - match op { - &Operand::Consume(ref lval) => - Some(lval_to_operand(lval.clone())), - &Operand::Constant(..) => { None }, - } - }).collect()) + destination.iter().map(|dest| lval_to_operand(dest.0.clone())) + .chain( + args.iter().filter_map(|op| { + match op { + &Operand::Consume(ref lval) => + Some(lval_to_operand(lval.clone())), + &Operand::Constant(..) => { None }, + } + }) + ).collect()) }; block_data.statements.push(release_stmt); } diff --git a/src/test/mir-opt/validate_1.rs b/src/test/mir-opt/validate_1.rs index 542ba87fef4b..9ac76a5f4ea6 100644 --- a/src/test/mir-opt/validate_1.rs +++ b/src/test/mir-opt/validate_1.rs @@ -46,7 +46,7 @@ fn main() { // Validate(Suspend(ReScope(Misc(NodeId(34)))), [(*_6): i32/ReScope(Misc(NodeId(34)))]); // _5 = &ReErased mut (*_6); // Validate(Acquire, [(*_5): i32/ReScope(Misc(NodeId(34)))]); -// Validate(Release, [_3: &ReScope(Misc(NodeId(34))) Test, _5: &ReScope(Misc(NodeId(34))) mut i32]); +// Validate(Release, [_2: (), _3: &ReScope(Misc(NodeId(34))) Test, _5: &ReScope(Misc(NodeId(34))) mut i32]); // _2 = const Test::foo(_3, _5) -> bb1; // } // diff --git a/src/test/mir-opt/validate_3.rs b/src/test/mir-opt/validate_3.rs index 100fae5c6781..9140cf5768f5 100644 --- a/src/test/mir-opt/validate_3.rs +++ b/src/test/mir-opt/validate_3.rs @@ -38,7 +38,7 @@ fn main() { // Validate(Suspend(ReScope(Misc(NodeId(46)))), [(*_5): i32/ReScope(Misc(NodeId(46))) (imm)]); // _4 = &ReErased (*_5); // Validate(Acquire, [(*_4): i32/ReScope(Misc(NodeId(46))) (imm)]); -// Validate(Release, [_4: &ReScope(Misc(NodeId(46))) i32]); +// Validate(Release, [_3: (), _4: &ReScope(Misc(NodeId(46))) i32]); // _3 = const foo(_4) -> bb1; // } // bb1: { diff --git a/src/test/mir-opt/validate_5.rs b/src/test/mir-opt/validate_5.rs index 1831f9dd713f..e9919af9fd3a 100644 --- a/src/test/mir-opt/validate_5.rs +++ b/src/test/mir-opt/validate_5.rs @@ -37,7 +37,7 @@ fn main() { // fn test(_1: &ReErased mut i32) -> () { // bb0: { // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), node: DefIndex(4) => validate_5/8cd878b::test[0] }, BrAnon(0)) mut i32]); -// Validate(Release, [_4: *mut i32]); +// Validate(Release, [_3: bool, _4: *mut i32]); // _3 = const write_42(_4) -> bb1; // } // } From 8c1699d8740c22397a56b3416a365a6083307c4b Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Wed, 2 Aug 2017 16:57:50 +1200 Subject: [PATCH 157/213] Update rls-data dep --- src/Cargo.lock | 12 +++++++++++- src/librustc_save_analysis/Cargo.toml | 2 +- src/librustc_save_analysis/json_dumper.rs | 10 +++++++--- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index 5f363cb4c487..9db21d5827d7 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1174,6 +1174,15 @@ dependencies = [ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rls-data" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rls-span" version = "0.4.0" @@ -1504,7 +1513,7 @@ name = "rustc_save_analysis" version = "0.0.0" dependencies = [ "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2220,6 +2229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" "checksum rls-analysis 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0127cfae9c726461facbbbc8327e782adf8afd61f7fcc6adf8ea9ad8fc428ed0" +"checksum rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11d339f1888e33e74d8032de0f83c40b2bdaaaf04a8cfc03b32186c3481fb534" "checksum rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f274ec7f966337dc2601fe9bde060b551d1293c277af782dc65cd7200ca070c0" "checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a" "checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff" diff --git a/src/librustc_save_analysis/Cargo.toml b/src/librustc_save_analysis/Cargo.toml index 00b01994eb8b..aa249af363f4 100644 --- a/src/librustc_save_analysis/Cargo.toml +++ b/src/librustc_save_analysis/Cargo.toml @@ -15,7 +15,7 @@ rustc_data_structures = { path = "../librustc_data_structures" } rustc_typeck = { path = "../librustc_typeck" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } -rls-data = "0.9" +rls-data = "0.10" rls-span = "0.4" # FIXME(#40527) should move rustc serialize out of tree rustc-serialize = "0.3" diff --git a/src/librustc_save_analysis/json_dumper.rs b/src/librustc_save_analysis/json_dumper.rs index 60cec4c5e464..30a698e63514 100644 --- a/src/librustc_save_analysis/json_dumper.rs +++ b/src/librustc_save_analysis/json_dumper.rs @@ -51,7 +51,11 @@ impl<'b> DumpOutput for CallbackOutput<'b> { impl<'b, W: Write> JsonDumper> { pub fn new(writer: &'b mut W, config: Config) -> JsonDumper> { - JsonDumper { output: WriteOutput { output: writer }, config, result: Analysis::new() } + JsonDumper { + output: WriteOutput { output: writer }, + config: config.clone(), + result: Analysis::new(config) + } } } @@ -61,8 +65,8 @@ impl<'b> JsonDumper> { -> JsonDumper> { JsonDumper { output: CallbackOutput { callback: callback }, - config, - result: Analysis::new(), + config: config.clone(), + result: Analysis::new(config), } } } From 5abbf798e7dc1a9bfbe287d3d3d19e764b28225b Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Wed, 2 Aug 2017 17:21:12 +1200 Subject: [PATCH 158/213] Update RLS --- src/Cargo.lock | 22 +++++++--------------- src/tools/rls | 2 +- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index 9db21d5827d7..2b8591ab88e1 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1140,8 +1140,8 @@ dependencies = [ "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "racer 2.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-analysis 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-analysis 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "rustfmt-nightly 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1155,21 +1155,12 @@ dependencies = [ [[package]] name = "rls-analysis" -version = "0.4.5" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "derive-new 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rls-data" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ + "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1181,6 +1172,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2228,9 +2221,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" -"checksum rls-analysis 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0127cfae9c726461facbbbc8327e782adf8afd61f7fcc6adf8ea9ad8fc428ed0" +"checksum rls-analysis 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca5b4d890953b9cc60c8c97f196921d02edf75798ccab930604aa3b4f890616d" "checksum rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11d339f1888e33e74d8032de0f83c40b2bdaaaf04a8cfc03b32186c3481fb534" -"checksum rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f274ec7f966337dc2601fe9bde060b551d1293c277af782dc65cd7200ca070c0" "checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a" "checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff" "checksum rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3058a43ada2c2d0b92b3ae38007a2d0fa5e9db971be260e0171408a4ff471c95" diff --git a/src/tools/rls b/src/tools/rls index 06b48d1c97dd..cb8a5900fd3b 160000 --- a/src/tools/rls +++ b/src/tools/rls @@ -1 +1 @@ -Subproject commit 06b48d1c97dd69968a24b4f506e85e3a3efb7dea +Subproject commit cb8a5900fd3b5907b2bac07ca9832f91fed29750 From b2c3a413b955ac89be06367f4db7706cbd88dc9c Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Wed, 2 Aug 2017 11:56:23 +0200 Subject: [PATCH 159/213] incr.comp.: Properly incorporate symbol linkage and visibility into CGU hash. --- src/librustc_trans/base.rs | 2 +- src/librustc_trans/partitioning.rs | 21 ++++----------------- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 14c73de64bc7..49a2885747f6 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -1172,7 +1172,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let cgu_name = String::from(cgu.name()); let cgu_id = cgu.work_product_id(); - let symbol_name_hash = cgu.compute_symbol_name_hash(scx, &exported_symbols); + let symbol_name_hash = cgu.compute_symbol_name_hash(scx); // Check whether there is a previous work-product we can // re-use. Not only must the file exist, and the inputs not diff --git a/src/librustc_trans/partitioning.rs b/src/librustc_trans/partitioning.rs index 904cfb2acd74..cff0eca02c60 100644 --- a/src/librustc_trans/partitioning.rs +++ b/src/librustc_trans/partitioning.rs @@ -174,29 +174,16 @@ impl<'tcx> CodegenUnit<'tcx> { } pub fn compute_symbol_name_hash<'a>(&self, - scx: &SharedCrateContext<'a, 'tcx>, - exported_symbols: &ExportedSymbols) + scx: &SharedCrateContext<'a, 'tcx>) -> u64 { let mut state = IchHasher::new(); - let exported_symbols = exported_symbols.local_exports(); let all_items = self.items_in_deterministic_order(scx.tcx()); - for (item, _) in all_items { + for (item, (linkage, visibility)) in all_items { let symbol_name = item.symbol_name(scx.tcx()); symbol_name.len().hash(&mut state); symbol_name.hash(&mut state); - let exported = match item { - TransItem::Fn(ref instance) => { - let node_id = - scx.tcx().hir.as_local_node_id(instance.def_id()); - node_id.map(|node_id| exported_symbols.contains(&node_id)) - .unwrap_or(false) - } - TransItem::Static(node_id) => { - exported_symbols.contains(&node_id) - } - TransItem::GlobalAsm(..) => true, - }; - exported.hash(&mut state); + linkage.hash(&mut state); + visibility.hash(&mut state); } state.finish().to_smaller_hash() } From 9861df47601cf6cb105d390db6c3a753dea7622e Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 30 Jul 2017 07:32:11 +0300 Subject: [PATCH 160/213] rustc_const_math: use apfloat::ieee::{Single,Double} in ConstFloat. --- src/librustc/ich/impls_const_math.rs | 6 +- src/librustc_const_eval/eval.rs | 55 ++++--- src/librustc_const_math/float.rs | 208 +++++++++++++++++--------- src/librustc_const_math/int.rs | 42 ------ src/librustc_const_math/lib.rs | 2 + src/librustc_trans/mir/constant.rs | 11 +- src/test/mir-opt/deaggregator_test.rs | 4 +- 7 files changed, 182 insertions(+), 146 deletions(-) diff --git a/src/librustc/ich/impls_const_math.rs b/src/librustc/ich/impls_const_math.rs index 6d11f2a87a41..6790c2ac7dec 100644 --- a/src/librustc/ich/impls_const_math.rs +++ b/src/librustc/ich/impls_const_math.rs @@ -11,9 +11,9 @@ //! This module contains `HashStable` implementations for various data types //! from `rustc_const_math` in no particular order. -impl_stable_hash_for!(enum ::rustc_const_math::ConstFloat { - F32(val), - F64(val) +impl_stable_hash_for!(struct ::rustc_const_math::ConstFloat { + ty, + bits }); impl_stable_hash_for!(enum ::rustc_const_math::ConstInt { diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index 463f256fe6c6..eb45fd9c0e0a 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -26,6 +26,7 @@ use rustc::util::nodemap::DefIdMap; use syntax::abi::Abi; use syntax::ast; +use syntax::attr; use rustc::hir::{self, Expr}; use syntax_pos::Span; @@ -560,8 +561,15 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty::TyUint(ast::UintTy::Us) => { Ok(Integral(Usize(ConstUsize::new_truncating(v, tcx.sess.target.uint_type)))) }, - ty::TyFloat(ast::FloatTy::F64) => Ok(Float(F64(val.to_f64()))), - ty::TyFloat(ast::FloatTy::F32) => Ok(Float(F32(val.to_f32()))), + ty::TyFloat(fty) => { + if let Some(i) = val.to_u128() { + Ok(Float(ConstFloat::from_u128(i, fty))) + } else { + // The value must be negative, go through signed integers. + let i = val.to_u128_unchecked() as i128; + Ok(Float(ConstFloat::from_i128(i, fty))) + } + } ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting an address to a raw ptr")), ty::TyChar => match val { U8(u) => Ok(Char(u as char)), @@ -574,30 +582,25 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn cast_const_float<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstFloat, ty: Ty<'tcx>) -> CastResult<'tcx> { + let int_width = |ty| { + ty::layout::Integer::from_attr(tcx, ty).size().bits() as usize + }; match ty.sty { - ty::TyInt(_) | ty::TyUint(_) => { - let i = match val { - F32(f) if f >= 0.0 => U128(f as u128), - F64(f) if f >= 0.0 => U128(f as u128), - - F32(f) => I128(f as i128), - F64(f) => I128(f as i128) - }; - - if let (I128(_), &ty::TyUint(_)) = (i, &ty.sty) { - return Err(CannotCast); + ty::TyInt(ity) => { + if let Some(i) = val.to_i128(int_width(attr::SignedInt(ity))) { + cast_const_int(tcx, I128(i), ty) + } else { + Err(CannotCast) } - - cast_const_int(tcx, i, ty) } - ty::TyFloat(ast::FloatTy::F64) => Ok(Float(F64(match val { - F32(f) => f as f64, - F64(f) => f - }))), - ty::TyFloat(ast::FloatTy::F32) => Ok(Float(F32(match val { - F64(f) => f as f32, - F32(f) => f - }))), + ty::TyUint(uty) => { + if let Some(i) = val.to_u128(int_width(attr::UnsignedInt(uty))) { + cast_const_int(tcx, U128(i), ty) + } else { + Err(CannotCast) + } + } + ty::TyFloat(fty) => Ok(Float(val.convert(fty))), _ => Err(CannotCast), } } @@ -691,11 +694,7 @@ fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, fn parse_float<'tcx>(num: &str, fty: ast::FloatTy) -> Result> { - let val = match fty { - ast::FloatTy::F32 => num.parse::().map(F32), - ast::FloatTy::F64 => num.parse::().map(F64) - }; - val.map_err(|_| { + ConstFloat::from_str(num, fty).map_err(|_| { // FIXME(#31407) this is only necessary because float parsing is buggy UnimplementedConstVal("could not evaluate float literal (see issue #31407)") }) diff --git a/src/librustc_const_math/float.rs b/src/librustc_const_math/float.rs index f557edffbda4..719f6b6a7b32 100644 --- a/src/librustc_const_math/float.rs +++ b/src/librustc_const_math/float.rs @@ -9,102 +9,164 @@ // except according to those terms. use std::cmp::Ordering; -use std::hash; -use std::mem::transmute; +use std::num::ParseFloatError; + +use syntax::ast; + +use rustc_apfloat::{Float, FloatConvert, Status}; +use rustc_apfloat::ieee::{Single, Double}; use super::err::*; -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] -pub enum ConstFloat { - F32(f32), - F64(f64) +// Note that equality for `ConstFloat` means that the it is the same +// constant, not that the rust values are equal. In particular, `NaN +// == NaN` (at least if it's the same NaN; distinct encodings for NaN +// are considering unequal). +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct ConstFloat { + pub ty: ast::FloatTy, + + // This is a bit inefficient but it makes conversions below more + // ergonomic, and all of this will go away once `miri` is merged. + pub bits: u128, } -pub use self::ConstFloat::*; impl ConstFloat { /// Description of the type, not the value pub fn description(&self) -> &'static str { - match *self { - F32(_) => "f32", - F64(_) => "f64", - } + self.ty.ty_to_string() } pub fn is_nan(&self) -> bool { - match *self { - F32(f) => f.is_nan(), - F64(f) => f.is_nan(), + match self.ty { + ast::FloatTy::F32 => Single::from_bits(self.bits).is_nan(), + ast::FloatTy::F64 => Double::from_bits(self.bits).is_nan(), } } /// Compares the values if they are of the same type pub fn try_cmp(self, rhs: Self) -> Result { - match (self, rhs) { - (F64(a), F64(b)) => { + match (self.ty, rhs.ty) { + (ast::FloatTy::F64, ast::FloatTy::F64) => { + let a = Double::from_bits(self.bits); + let b = Double::from_bits(rhs.bits); // This is pretty bad but it is the existing behavior. - Ok(if a == b { - Ordering::Equal - } else if a < b { - Ordering::Less - } else { - Ordering::Greater - }) + Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater)) } - (F32(a), F32(b)) => { - Ok(if a == b { - Ordering::Equal - } else if a < b { - Ordering::Less - } else { - Ordering::Greater - }) + (ast::FloatTy::F32, ast::FloatTy::F32) => { + let a = Single::from_bits(self.bits); + let b = Single::from_bits(rhs.bits); + Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater)) } _ => Err(CmpBetweenUnequalTypes), } } -} -/// Note that equality for `ConstFloat` means that the it is the same -/// constant, not that the rust values are equal. In particular, `NaN -/// == NaN` (at least if it's the same NaN; distinct encodings for NaN -/// are considering unequal). -impl PartialEq for ConstFloat { - fn eq(&self, other: &Self) -> bool { - match (*self, *other) { - (F64(a), F64(b)) => { - unsafe{transmute::<_,u64>(a) == transmute::<_,u64>(b)} + pub fn from_i128(input: i128, ty: ast::FloatTy) -> Self { + let bits = match ty { + ast::FloatTy::F32 => Single::from_i128(input).value.to_bits(), + ast::FloatTy::F64 => Double::from_i128(input).value.to_bits() + }; + ConstFloat { bits, ty } + } + + pub fn from_u128(input: u128, ty: ast::FloatTy) -> Self { + let bits = match ty { + ast::FloatTy::F32 => Single::from_u128(input).value.to_bits(), + ast::FloatTy::F64 => Double::from_u128(input).value.to_bits() + }; + ConstFloat { bits, ty } + } + + pub fn from_str(num: &str, ty: ast::FloatTy) -> Result { + let bits = match ty { + ast::FloatTy::F32 => { + let rust_bits = num.parse::()?.to_bits() as u128; + let apfloat = num.parse::().unwrap_or_else(|e| { + panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e); + }); + let apfloat_bits = apfloat.to_bits(); + assert!(rust_bits == apfloat_bits, + "apfloat::ieee::Single gave different result for `{}`: \ + {}({:#x}) vs Rust's {}({:#x})", + num, apfloat, apfloat_bits, + Single::from_bits(rust_bits), rust_bits); + apfloat_bits } - (F32(a), F32(b)) => { - unsafe{transmute::<_,u32>(a) == transmute::<_,u32>(b)} + ast::FloatTy::F64 => { + let rust_bits = num.parse::()?.to_bits() as u128; + let apfloat = num.parse::().unwrap_or_else(|e| { + panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e); + }); + let apfloat_bits = apfloat.to_bits(); + assert!(rust_bits == apfloat_bits, + "apfloat::ieee::Double gave different result for `{}`: \ + {}({:#x}) vs Rust's {}({:#x})", + num, apfloat, apfloat_bits, + Double::from_bits(rust_bits), rust_bits); + apfloat_bits } - _ => false + }; + Ok(ConstFloat { bits, ty }) + } + + pub fn to_i128(self, width: usize) -> Option { + assert!(width <= 128); + let r = match self.ty { + ast::FloatTy::F32 => Single::from_bits(self.bits).to_i128(width), + ast::FloatTy::F64 => Double::from_bits(self.bits).to_i128(width) + }; + if r.status.intersects(Status::INVALID_OP) { + None + } else { + Some(r.value) } } -} -impl Eq for ConstFloat {} - -impl hash::Hash for ConstFloat { - fn hash(&self, state: &mut H) { - match *self { - F64(a) => { - unsafe { transmute::<_,u64>(a) }.hash(state) - } - F32(a) => { - unsafe { transmute::<_,u32>(a) }.hash(state) - } + pub fn to_u128(self, width: usize) -> Option { + assert!(width <= 128); + let r = match self.ty { + ast::FloatTy::F32 => Single::from_bits(self.bits).to_u128(width), + ast::FloatTy::F64 => Double::from_bits(self.bits).to_u128(width) + }; + if r.status.intersects(Status::INVALID_OP) { + None + } else { + Some(r.value) } } + + pub fn convert(self, to: ast::FloatTy) -> Self { + let bits = match (self.ty, to) { + (ast::FloatTy::F32, ast::FloatTy::F32) | + (ast::FloatTy::F64, ast::FloatTy::F64) => return self, + + (ast::FloatTy::F32, ast::FloatTy::F64) => { + Double::to_bits(Single::from_bits(self.bits).convert(&mut false).value) + } + (ast::FloatTy::F64, ast::FloatTy::F32) => { + Single::to_bits(Double::from_bits(self.bits).convert(&mut false).value) + } + }; + ConstFloat { bits, ty: to } + } } impl ::std::fmt::Display for ConstFloat { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - match *self { - F32(f) => write!(fmt, "{}f32", f), - F64(f) => write!(fmt, "{}f64", f), + match self.ty { + ast::FloatTy::F32 => write!(fmt, "{:#}", Single::from_bits(self.bits))?, + ast::FloatTy::F64 => write!(fmt, "{:#}", Double::from_bits(self.bits))?, } + write!(fmt, "{}", self.ty) + } +} + +impl ::std::fmt::Debug for ConstFloat { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + ::std::fmt::Display::fmt(self, fmt) } } @@ -113,11 +175,20 @@ macro_rules! derive_binop { impl ::std::ops::$op for ConstFloat { type Output = Result; fn $func(self, rhs: Self) -> Result { - match (self, rhs) { - (F32(a), F32(b)) => Ok(F32(a.$func(b))), - (F64(a), F64(b)) => Ok(F64(a.$func(b))), - _ => Err(UnequalTypes(Op::$op)), - } + let bits = match (self.ty, rhs.ty) { + (ast::FloatTy::F32, ast::FloatTy::F32) =>{ + let a = Single::from_bits(self.bits); + let b = Single::from_bits(rhs.bits); + a.$func(b).value.to_bits() + } + (ast::FloatTy::F64, ast::FloatTy::F64) => { + let a = Double::from_bits(self.bits); + let b = Double::from_bits(rhs.bits); + a.$func(b).value.to_bits() + } + _ => return Err(UnequalTypes(Op::$op)), + }; + Ok(ConstFloat { bits, ty: self.ty }) } } } @@ -132,9 +203,10 @@ derive_binop!(Rem, rem); impl ::std::ops::Neg for ConstFloat { type Output = Self; fn neg(self) -> Self { - match self { - F32(f) => F32(-f), - F64(f) => F64(-f), - } + let bits = match self.ty { + ast::FloatTy::F32 => (-Single::from_bits(self.bits)).to_bits(), + ast::FloatTy::F64 => (-Double::from_bits(self.bits)).to_bits(), + }; + ConstFloat { bits, ty: self.ty } } } diff --git a/src/librustc_const_math/int.rs b/src/librustc_const_math/int.rs index d97276da9bf3..65471416e800 100644 --- a/src/librustc_const_math/int.rs +++ b/src/librustc_const_math/int.rs @@ -211,48 +211,6 @@ impl ConstInt { } } - pub fn to_f32(self) -> f32 { - match self { - I8(i) => i as f32, - I16(i) => i as f32, - I32(i) => i as f32, - I64(i) => i as f32, - I128(i) => i as f32, - Isize(Is16(i)) => i as f32, - Isize(Is32(i)) => i as f32, - Isize(Is64(i)) => i as f32, - U8(i) => i as f32, - U16(i) => i as f32, - U32(i) => i as f32, - U64(i) => i as f32, - U128(i) => i as f32, - Usize(Us16(i)) => i as f32, - Usize(Us32(i)) => i as f32, - Usize(Us64(i)) => i as f32, - } - } - - pub fn to_f64(self) -> f64 { - match self { - I8(i) => i as f64, - I16(i) => i as f64, - I32(i) => i as f64, - I64(i) => i as f64, - I128(i) => i as f64, - Isize(Is16(i)) => i as f64, - Isize(Is32(i)) => i as f64, - Isize(Is64(i)) => i as f64, - U8(i) => i as f64, - U16(i) => i as f64, - U32(i) => i as f64, - U64(i) => i as f64, - U128(i) => i as f64, - Usize(Us16(i)) => i as f64, - Usize(Us32(i)) => i as f64, - Usize(Us64(i)) => i as f64, - } - } - pub fn is_negative(&self) -> bool { match *self { I8(v) => v < 0, diff --git a/src/librustc_const_math/lib.rs b/src/librustc_const_math/lib.rs index 0dce0e1fb026..3947edecb5af 100644 --- a/src/librustc_const_math/lib.rs +++ b/src/librustc_const_math/lib.rs @@ -26,6 +26,8 @@ #![feature(i128)] #![feature(i128_type)] +extern crate rustc_apfloat; + extern crate syntax; extern crate serialize as rustc_serialize; // used by deriving diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 393fa9c0c8e0..7ece5a42ca19 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef}; use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind}; use rustc_const_math::ConstInt::*; -use rustc_const_math::ConstFloat::*; +use rustc_const_math::ConstFloat; use rustc_const_math::{ConstInt, ConstMathErr}; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; @@ -95,8 +95,13 @@ impl<'tcx> Const<'tcx> { -> Const<'tcx> { let llty = type_of::type_of(ccx, ty); let val = match cv { - ConstVal::Float(F32(v)) => C_floating_f64(v as f64, llty), - ConstVal::Float(F64(v)) => C_floating_f64(v, llty), + ConstVal::Float(v) => { + let v_f64 = match v { + ConstFloat::F32(v) => f32::from_bits(v) as f64, + ConstFloat::F64(v) => f64::from_bits(v) + }; + C_floating_f64(v_f64, llty) + } ConstVal::Bool(v) => C_bool(ccx, v), ConstVal::Integral(ref i) => return Const::from_constint(ccx, i), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), diff --git a/src/test/mir-opt/deaggregator_test.rs b/src/test/mir-opt/deaggregator_test.rs index f136d74fa517..81dd1932894f 100644 --- a/src/test/mir-opt/deaggregator_test.rs +++ b/src/test/mir-opt/deaggregator_test.rs @@ -25,7 +25,7 @@ fn main() {} // bb0: { // _2 = _1; // _3 = _2; -// _0 = Baz { x: _3, y: const F32(0), z: const false }; +// _0 = Baz { x: _3, y: const 0f32, z: const false }; // return; // } // END rustc.node13.Deaggregator.before.mir @@ -34,7 +34,7 @@ fn main() {} // _2 = _1; // _3 = _2; // (_0.0: usize) = _3; -// (_0.1: f32) = const F32(0); +// (_0.1: f32) = const 0f32; // (_0.2: bool) = const false; // return; // } From c457b26e33ccd83da9055acb19f6099e4a353a12 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 30 Jul 2017 20:43:53 +0300 Subject: [PATCH 161/213] rustc_trans: do not pass floating-point values to LLVM through FFI. --- src/librustc_llvm/ffi.rs | 1 - src/librustc_trans/base.rs | 3 +-- src/librustc_trans/common.rs | 6 ------ src/librustc_trans/consts.rs | 6 ++++++ src/librustc_trans/mir/constant.rs | 12 ++++++------ 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 24d4040ccb08..20735af69e37 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -598,7 +598,6 @@ extern "C" { // Operations on scalar constants pub fn LLVMConstInt(IntTy: TypeRef, N: c_ulonglong, SignExtend: Bool) -> ValueRef; pub fn LLVMConstIntOfArbitraryPrecision(IntTy: TypeRef, Wn: c_uint, Ws: *const u64) -> ValueRef; - pub fn LLVMConstReal(RealTy: TypeRef, N: f64) -> ValueRef; pub fn LLVMConstIntGetZExtValue(ConstantVal: ValueRef) -> c_ulonglong; pub fn LLVMConstIntGetSExtValue(ConstantVal: ValueRef) -> c_longlong; pub fn LLVMRustConstInt128Get(ConstantVal: ValueRef, SExt: bool, diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 14c73de64bc7..086a8290cff7 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -844,8 +844,7 @@ fn create_imps(sess: &Session, let imp = llvm::LLVMAddGlobal(llvm_module.llmod, i8p_ty.to_ref(), imp_name.as_ptr() as *const _); - let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref()); - llvm::LLVMSetInitializer(imp, init); + llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty)); llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 9b0803908b16..61766a3db2c9 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -223,12 +223,6 @@ pub fn C_big_integral(t: Type, u: u128) -> ValueRef { } } -pub fn C_floating_f64(f: f64, t: Type) -> ValueRef { - unsafe { - llvm::LLVMConstReal(t.to_ref(), f) - } -} - pub fn C_nil(ccx: &CrateContext) -> ValueRef { C_struct(ccx, &[], false) } diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index da2a58398634..310cd6fe9559 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -36,6 +36,12 @@ pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef { } } +pub fn bitcast(val: ValueRef, ty: Type) -> ValueRef { + unsafe { + llvm::LLVMConstBitCast(val, ty.to_ref()) + } +} + pub fn addr_of_mut(ccx: &CrateContext, cv: ValueRef, align: machine::llalign, diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 7ece5a42ca19..265600a35e76 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -11,7 +11,6 @@ use llvm::{self, ValueRef}; use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind}; use rustc_const_math::ConstInt::*; -use rustc_const_math::ConstFloat; use rustc_const_math::{ConstInt, ConstMathErr}; use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; @@ -27,7 +26,7 @@ use abi::{self, Abi}; use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; -use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral}; +use common::{C_array, C_bool, C_bytes, C_integral, C_big_integral, C_u32, C_u64}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef}; use common::const_to_opt_u128; use consts; @@ -37,6 +36,7 @@ use type_::Type; use value::Value; use syntax_pos::Span; +use syntax::ast; use std::fmt; use std::ptr; @@ -96,11 +96,11 @@ impl<'tcx> Const<'tcx> { let llty = type_of::type_of(ccx, ty); let val = match cv { ConstVal::Float(v) => { - let v_f64 = match v { - ConstFloat::F32(v) => f32::from_bits(v) as f64, - ConstFloat::F64(v) => f64::from_bits(v) + let bits = match v.ty { + ast::FloatTy::F32 => C_u32(ccx, v.bits as u32), + ast::FloatTy::F64 => C_u64(ccx, v.bits as u64) }; - C_floating_f64(v_f64, llty) + consts::bitcast(bits, llty) } ConstVal::Bool(v) => C_bool(ccx, v), ConstVal::Integral(ref i) => return Const::from_constint(ccx, i), From 4792d28ef1ce958698d20d7c20baa78c83c3140e Mon Sep 17 00:00:00 2001 From: Kornel Date: Wed, 2 Aug 2017 15:40:08 +0100 Subject: [PATCH 162/213] Emphasise that these functions look at the disk, not just the path --- src/libstd/path.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libstd/path.rs b/src/libstd/path.rs index e083ab0ef976..c90a0c785273 100644 --- a/src/libstd/path.rs +++ b/src/libstd/path.rs @@ -2237,7 +2237,7 @@ impl Path { fs::metadata(self).is_ok() } - /// Returns whether the path is pointing at a regular file. + /// Returns whether the path exists on disk and is pointing at a regular file. /// /// This function will traverse symbolic links to query information about the /// destination file. In case of broken symbolic links this will return `false`. @@ -2266,7 +2266,7 @@ impl Path { fs::metadata(self).map(|m| m.is_file()).unwrap_or(false) } - /// Returns whether the path is pointing at a directory. + /// Returns whether the path exists on disk and is pointing at a directory. /// /// This function will traverse symbolic links to query information about the /// destination file. In case of broken symbolic links this will return `false`. From e412cb30dc1a833e2eb1b3601ed934a89c07fe08 Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Wed, 2 Aug 2017 09:58:16 -0700 Subject: [PATCH 163/213] Fix some unaligned reads on SPARC in LTO This fixes #43593 by eliminating some undefined behavior. --- src/librustc_trans/back/lto.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/librustc_trans/back/lto.rs b/src/librustc_trans/back/lto.rs index e160d6b6c6ab..3e2d9f5c32e8 100644 --- a/src/librustc_trans/back/lto.rs +++ b/src/librustc_trans/back/lto.rs @@ -27,6 +27,7 @@ use flate2::read::DeflateDecoder; use std::io::Read; use std::ffi::CString; use std::path::Path; +use std::ptr::read_unaligned; pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { match crate_type { @@ -223,13 +224,13 @@ fn is_versioned_bytecode_format(bc: &[u8]) -> bool { fn extract_bytecode_format_version(bc: &[u8]) -> u32 { let pos = link::RLIB_BYTECODE_OBJECT_VERSION_OFFSET; let byte_data = &bc[pos..pos + 4]; - let data = unsafe { *(byte_data.as_ptr() as *const u32) }; + let data = unsafe { read_unaligned(byte_data.as_ptr() as *const u32) }; u32::from_le(data) } fn extract_compressed_bytecode_size_v1(bc: &[u8]) -> u64 { let pos = link::RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET; let byte_data = &bc[pos..pos + 8]; - let data = unsafe { *(byte_data.as_ptr() as *const u64) }; + let data = unsafe { read_unaligned(byte_data.as_ptr() as *const u64) }; u64::from_le(data) } From daa764461d57ab69ef9fa9dcbf2a73c4fb0e6f94 Mon Sep 17 00:00:00 2001 From: Matt Brubeck Date: Wed, 2 Aug 2017 10:42:12 -0700 Subject: [PATCH 164/213] Remove unused fnv hash code --- src/librustc_data_structures/fnv.rs | 66 ----------------------------- src/librustc_data_structures/lib.rs | 1 - 2 files changed, 67 deletions(-) delete mode 100644 src/librustc_data_structures/fnv.rs diff --git a/src/librustc_data_structures/fnv.rs b/src/librustc_data_structures/fnv.rs deleted file mode 100644 index 5bd57236e7c2..000000000000 --- a/src/librustc_data_structures/fnv.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::collections::{HashMap, HashSet}; -use std::default::Default; -use std::hash::{Hasher, Hash, BuildHasherDefault}; - -pub type FnvHashMap = HashMap>; -pub type FnvHashSet = HashSet>; - -#[allow(non_snake_case)] -pub fn FnvHashMap() -> FnvHashMap { - HashMap::default() -} - -#[allow(non_snake_case)] -pub fn FnvHashSet() -> FnvHashSet { - HashSet::default() -} - -/// A speedy hash algorithm for node ids and def ids. The hashmap in -/// liballoc by default uses SipHash which isn't quite as speedy as we -/// want. In the compiler we're not really worried about DOS attempts, so we -/// just default to a non-cryptographic hash. -/// -/// This uses FNV hashing, as described here: -/// http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function -pub struct FnvHasher(u64); - -impl Default for FnvHasher { - /// Creates a `FnvHasher`, with a 64-bit hex initial value. - #[inline] - fn default() -> FnvHasher { - FnvHasher(0xcbf29ce484222325) - } -} - -impl Hasher for FnvHasher { - #[inline] - fn write(&mut self, bytes: &[u8]) { - let FnvHasher(mut hash) = *self; - for byte in bytes { - hash = hash ^ (*byte as u64); - hash = hash.wrapping_mul(0x100000001b3); - } - *self = FnvHasher(hash); - } - - #[inline] - fn finish(&self) -> u64 { - self.0 - } -} - -pub fn hash(v: &T) -> u64 { - let mut state = FnvHasher::default(); - v.hash(&mut state); - state.finish() -} diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index bb27d479a414..3cb3e088364d 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -65,7 +65,6 @@ pub mod snapshot_vec; pub mod stable_hasher; pub mod transitive_relation; pub mod unify; -pub mod fnv; pub mod fx; pub mod tuple_slice; pub mod veccell; From 1072891b7650f4c4d687ef8032d29638ef5b8f85 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Wed, 2 Aug 2017 23:00:35 +0200 Subject: [PATCH 165/213] Improve color theme for color blind issues --- src/librustdoc/html/static/rustdoc.css | 6 ------ src/librustdoc/html/static/styles/main.css | 8 +++++++- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/librustdoc/html/static/rustdoc.css b/src/librustdoc/html/static/rustdoc.css index 9d237bba1bcd..e100b0e1b433 100644 --- a/src/librustdoc/html/static/rustdoc.css +++ b/src/librustdoc/html/static/rustdoc.css @@ -434,12 +434,6 @@ a { text-decoration: underline; } -.content span.enum, .content a.enum, .block a.current.enum { color: #5e9766; } -.content span.struct, .content a.struct, .block a.current.struct { color: #df3600; } -.content span.type, .content a.type, .block a.current.type { color: #e57300; } -.content span.macro, .content a.macro, .block a.current.macro { color: #068000; } -.content span.union, .content a.union, .block a.current.union { color: #c0c74f; } -.content span.constant, .content a.constant, .block a.current.constant { color: #c7944f; } .block a.current.crate { font-weight: 500; } .search-input { diff --git a/src/librustdoc/html/static/styles/main.css b/src/librustdoc/html/static/styles/main.css index c03101990885..531411428678 100644 --- a/src/librustdoc/html/static/styles/main.css +++ b/src/librustdoc/html/static/styles/main.css @@ -97,7 +97,13 @@ pre { border-bottom-color: #ddd; } -.content span.primitive, .content a.primitive, .block a.current.primitive { color: #39a7bf; } +.content span.enum, .content a.enum, .block a.current.enum { color: #508157; } +.content span.struct, .content a.struct, .block a.current.struct { color: #df3600; } +.content span.type, .content a.type, .block a.current.type { color: #ba5d00; } +.content span.macro, .content a.macro, .block a.current.macro { color: #068000; } +.content span.union, .content a.union, .block a.current.union { color: #767b27; } +.content span.constant, .content a.constant, .block a.current.constant { color: #9a6e31; } +.content span.primitive, .content a.primitive, .block a.current.primitive { color: #2c8093; } .content span.externcrate, .content span.mod, .content a.mod, .block a.current.mod { color: #4d76ae; } .content span.fn, .content a.fn, .block a.current.fn, From 971ef720248564c6a8f5ffde90398f931ed210d5 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Wed, 2 Aug 2017 23:09:41 +0200 Subject: [PATCH 166/213] Improve functions --- src/librustdoc/html/static/styles/main.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustdoc/html/static/styles/main.css b/src/librustdoc/html/static/styles/main.css index 531411428678..2989d170b075 100644 --- a/src/librustdoc/html/static/styles/main.css +++ b/src/librustdoc/html/static/styles/main.css @@ -109,7 +109,7 @@ pre { .content span.fn, .content a.fn, .block a.current.fn, .content span.method, .content a.method, .block a.current.method, .content span.tymethod, .content a.tymethod, .block a.current.tymethod, -.content .fnname { color: #8c6067; } +.content .fnname { color: #546e8a; } pre.rust .comment { color: #8E908C; } pre.rust .doccomment { color: #4D4D4C; } From a8858eba98bcba47a0bfeed9860c496ebcebf9c2 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Thu, 3 Aug 2017 00:36:43 +0200 Subject: [PATCH 167/213] Update nomicon --- src/doc/nomicon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/doc/nomicon b/src/doc/nomicon index f8fd6710399a..f570bcb68177 160000 --- a/src/doc/nomicon +++ b/src/doc/nomicon @@ -1 +1 @@ -Subproject commit f8fd6710399a1a557155cb5be4922fe6a6f694c0 +Subproject commit f570bcb681771d691aa4fdb8dfcfad1939844bf5 From 795db4c946a5093877b50a59f0575e58103fec0f Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Wed, 2 Aug 2017 20:57:47 -0400 Subject: [PATCH 168/213] Fix broken links in `Thread` docs. --- src/libstd/thread/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index 2ae62f8c3e0a..c1d9a0c9e6b1 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -896,6 +896,9 @@ struct Inner { /// docs of [`Builder`] and [`spawn`] for more details. /// /// [`Builder`]: ../../std/thread/struct.Builder.html +/// [`JoinHandle::thread`]: ../../std/thread/struct.JoinHandle.html#method.thread +/// [`JoinHandle`]: ../../std/thread/struct.JoinHandle.html +/// [`thread::current`]: ../../std/thread/fn.current.html /// [`spawn`]: ../../std/thread/fn.spawn.html pub struct Thread { From 4c08c131fa6ef63809f6a4eb25ae1289dd381259 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Wed, 2 Aug 2017 20:59:33 -0400 Subject: [PATCH 169/213] Indicate how `ThreadId` is created. --- src/libstd/thread/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index c1d9a0c9e6b1..cbd019c2c0e6 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -820,7 +820,8 @@ pub fn park_timeout(dur: Duration) { /// /// A `ThreadId` is an opaque object that has a unique value for each thread /// that creates one. `ThreadId`s are not guaranteed to correspond to a thread's -/// system-designated identifier. +/// system-designated identifier. A `ThreadId` can be retrieved from the [`id`] +/// method on a [`Thread`]. /// /// # Examples /// @@ -834,6 +835,9 @@ pub fn park_timeout(dur: Duration) { /// let other_thread_id = other_thread.join().unwrap(); /// assert!(thread::current().id() != other_thread_id); /// ``` +/// +/// [`id`]: ../../std/thread/struct.Thread.html#method.id +/// [`Thread`]: ../../std/thread/struct.Thread.html #[stable(feature = "thread_id", since = "1.19.0")] #[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] pub struct ThreadId(u64); From 62f179b36e6d890a56837d52b992bb7ad220ad27 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Wed, 2 Aug 2017 21:12:36 -0700 Subject: [PATCH 170/213] extend config.toml doc --- src/bootstrap/config.toml.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/config.toml.example b/src/bootstrap/config.toml.example index bf4786ba552a..9314135050f0 100644 --- a/src/bootstrap/config.toml.example +++ b/src/bootstrap/config.toml.example @@ -203,7 +203,7 @@ #codegen-units = 1 # Whether or not debug assertions are enabled for the compiler and standard -# library +# library. Also enables compilation of debug! and trace! logging macros. #debug-assertions = false # Whether or not debuginfo is emitted From 2683ba631b41aae266dfc2a8661ca9442db96a2d Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Thu, 3 Aug 2017 10:20:01 +1200 Subject: [PATCH 171/213] Appease tidy and fix save-analysis config for dist builds --- src/bootstrap/bin/rustc.rs | 2 +- src/librustc_save_analysis/dump_visitor.rs | 23 +++++++++++----------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index ac2e3bc402ac..f6ed4ee91b3c 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -188,7 +188,7 @@ fn main() { cmd.arg("-Zsave-analysis"); cmd.env("RUST_SAVE_ANALYSIS_CONFIG", "{\"output_file\": null,\"full_docs\": false,\"pub_only\": true,\ - \"signatures\": false,\"borrow_data\": false}"); + \"distro_crate\": true,\"signatures\": false,\"borrow_data\": false}"); } // Dealing with rpath here is a little special, so let's go into some diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs index 4cfd570adead..4740f9a0d5a5 100644 --- a/src/librustc_save_analysis/dump_visitor.rs +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -561,17 +561,18 @@ impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> { if let ast::ItemKind::Struct(ast::VariantData::Struct(ref fields, _), _) = item.node { let include_priv_fields = !self.save_ctxt.config.pub_only; - let fields_str = fields.iter() - .enumerate() - .filter_map(|(i, f)| { - if include_priv_fields || f.vis == ast::Visibility::Public { - f.ident.map(|i| i.to_string()).or_else(|| Some(i.to_string())) - } else { - None - } - }) - .collect::>() - .join(", "); + let fields_str = fields + .iter() + .enumerate() + .filter_map(|(i, f)| { + if include_priv_fields || f.vis == ast::Visibility::Public { + f.ident.map(|i| i.to_string()).or_else(|| Some(i.to_string())) + } else { + None + } + }) + .collect::>() + .join(", "); let value = format!("{} {{ {} }}", name, fields_str); (value, fields.iter().map(|f| ::id_from_node_id(f.id, &self.save_ctxt)).collect()) } else { From a704511f9d10804bea55e0d4d7eeb4cab6c413fc Mon Sep 17 00:00:00 2001 From: Daan Sprenkels Date: Thu, 3 Aug 2017 04:14:16 +0200 Subject: [PATCH 172/213] Add more regression tests for #40510 --- .../{issue-40510.rs => issue-40510-1.rs} | 0 src/test/compile-fail/issue-40510-2.rs | 23 +++++++++++++++++ src/test/compile-fail/issue-40510-3.rs | 25 +++++++++++++++++++ src/test/compile-fail/issue-40510-4.rs | 25 +++++++++++++++++++ 4 files changed, 73 insertions(+) rename src/test/compile-fail/{issue-40510.rs => issue-40510-1.rs} (100%) create mode 100644 src/test/compile-fail/issue-40510-2.rs create mode 100644 src/test/compile-fail/issue-40510-3.rs create mode 100644 src/test/compile-fail/issue-40510-4.rs diff --git a/src/test/compile-fail/issue-40510.rs b/src/test/compile-fail/issue-40510-1.rs similarity index 100% rename from src/test/compile-fail/issue-40510.rs rename to src/test/compile-fail/issue-40510-1.rs diff --git a/src/test/compile-fail/issue-40510-2.rs b/src/test/compile-fail/issue-40510-2.rs new file mode 100644 index 000000000000..0fe565848226 --- /dev/null +++ b/src/test/compile-fail/issue-40510-2.rs @@ -0,0 +1,23 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] +#![allow(unused)] + +fn f() { + let x: Box<()> = Box::new(()); + + || { + &x + }; +} + +#[rustc_error] +fn main() {} //~ ERROR compilation successful diff --git a/src/test/compile-fail/issue-40510-3.rs b/src/test/compile-fail/issue-40510-3.rs new file mode 100644 index 000000000000..afa8f15ee570 --- /dev/null +++ b/src/test/compile-fail/issue-40510-3.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] +#![allow(unused)] + +fn f() { + let mut x: Vec<()> = Vec::new(); + + || { + || { + x.push(()) + } + }; +} + +#[rustc_error] +fn main() {} //~ ERROR compilation successful diff --git a/src/test/compile-fail/issue-40510-4.rs b/src/test/compile-fail/issue-40510-4.rs new file mode 100644 index 000000000000..a39c500225b9 --- /dev/null +++ b/src/test/compile-fail/issue-40510-4.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] +#![allow(unused)] + +fn f() { + let x: Vec<()> = Vec::new(); + + || { + || { + x.len() + } + }; +} + +#[rustc_error] +fn main() {} //~ ERROR compilation successful From 26d124ca0eb6fe2fce5aa0541b269be27473d0b9 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Thu, 3 Aug 2017 12:46:35 +0200 Subject: [PATCH 173/213] Invert constant and function color --- src/librustdoc/html/static/styles/main.css | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustdoc/html/static/styles/main.css b/src/librustdoc/html/static/styles/main.css index 2989d170b075..63e7510b22ec 100644 --- a/src/librustdoc/html/static/styles/main.css +++ b/src/librustdoc/html/static/styles/main.css @@ -102,14 +102,14 @@ pre { .content span.type, .content a.type, .block a.current.type { color: #ba5d00; } .content span.macro, .content a.macro, .block a.current.macro { color: #068000; } .content span.union, .content a.union, .block a.current.union { color: #767b27; } -.content span.constant, .content a.constant, .block a.current.constant { color: #9a6e31; } +.content span.constant, .content a.constant, .block a.current.constant { color: #546e8a; } .content span.primitive, .content a.primitive, .block a.current.primitive { color: #2c8093; } .content span.externcrate, .content span.mod, .content a.mod, .block a.current.mod { color: #4d76ae; } .content span.fn, .content a.fn, .block a.current.fn, .content span.method, .content a.method, .block a.current.method, .content span.tymethod, .content a.tymethod, .block a.current.tymethod, -.content .fnname { color: #546e8a; } +.content .fnname { color: #9a6e31; } pre.rust .comment { color: #8E908C; } pre.rust .doccomment { color: #4D4D4C; } From ac919d527ced593e1b199b8c5b7b8cafa3840375 Mon Sep 17 00:00:00 2001 From: scalexm Date: Tue, 1 Aug 2017 21:38:52 +0200 Subject: [PATCH 174/213] Add a more precise error message When trying to perform static dispatch on something which derefs to a trait object, and the target trait is not in scope, we had confusing error messages if the target method had a `Self: Sized` bound. We add a more precise error message in this case: "consider using trait ...". Fixes #35976. --- src/librustc/ty/mod.rs | 2 +- src/librustc_typeck/check/method/confirm.rs | 45 ++++++++++++- src/librustc_typeck/check/method/mod.rs | 70 +++++++++++++++++---- src/librustc_typeck/check/method/probe.rs | 30 +++++++-- src/test/compile-fail/issue-35976.rs | 33 ++++++++++ 5 files changed, 157 insertions(+), 23 deletions(-) create mode 100644 src/test/compile-fail/issue-35976.rs diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index f245b1503dab..eef0bcc37535 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -160,7 +160,7 @@ pub struct ImplHeader<'tcx> { pub predicates: Vec>, } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct AssociatedItem { pub def_id: DefId, pub name: Name, diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index ad4ee5a9d6dc..0ab75ad622ce 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -38,6 +38,11 @@ impl<'a, 'gcx, 'tcx> Deref for ConfirmContext<'a, 'gcx, 'tcx> { } } +pub struct ConfirmResult<'tcx> { + pub callee: MethodCallee<'tcx>, + pub rerun: bool, +} + impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn confirm_method(&self, span: Span, @@ -46,7 +51,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { unadjusted_self_ty: Ty<'tcx>, pick: probe::Pick<'tcx>, segment: &hir::PathSegment) - -> MethodCallee<'tcx> { + -> ConfirmResult<'tcx> { debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, generic_args={:?})", unadjusted_self_ty, pick, @@ -75,7 +80,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { unadjusted_self_ty: Ty<'tcx>, pick: probe::Pick<'tcx>, segment: &hir::PathSegment) - -> MethodCallee<'tcx> { + -> ConfirmResult<'tcx> { // Adjust the self expression the user provided and obtain the adjusted type. let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick); @@ -91,6 +96,16 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // Create the final signature for the method, replacing late-bound regions. let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_substs); + // If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that + // something which derefs to `Self` actually implements the trait and the caller + // wanted to make a static dispatch on it but forgot to import the trait. + // See test `src/test/compile-fail/issue-35976.rs`. + // + // In that case, we'll error anyway, but we'll also re-run the search with all traits + // in scope, and if we find another method which can be used, we'll output an + // appropriate hint suggesting to import the trait. + let rerun = self.predicates_require_illegal_sized_bound(&method_predicates); + // Unify the (adjusted) self type with what the method expects. self.unify_receivers(self_ty, method_sig.inputs()[0]); @@ -109,7 +124,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { self.convert_lvalue_derefs_to_mutable(); } - callee + ConfirmResult { callee, rerun } } /////////////////////////////////////////////////////////////////////////// @@ -533,6 +548,30 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { /////////////////////////////////////////////////////////////////////////// // MISCELLANY + fn predicates_require_illegal_sized_bound(&self, + predicates: &ty::InstantiatedPredicates<'tcx>) + -> bool { + let sized_def_id = match self.tcx.lang_items.sized_trait() { + Some(def_id) => def_id, + None => return false, + }; + + traits::elaborate_predicates(self.tcx, predicates.predicates.clone()) + .filter_map(|predicate| { + match predicate { + ty::Predicate::Trait(trait_pred) if trait_pred.def_id() == sized_def_id => + Some(trait_pred), + _ => None, + } + }) + .any(|trait_pred| { + match trait_pred.0.self_ty().sty { + ty::TyDynamic(..) => true, + _ => false, + } + }) + } + fn enforce_illegal_method_limitations(&self, pick: &probe::Pick) { // Disallow calls to the method `drop` defined in the `Drop` trait. match pick.item.container { diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index c842e47aaf51..f875511a9f59 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -33,7 +33,7 @@ mod confirm; pub mod probe; mod suggest; -use self::probe::IsSuggestion; +use self::probe::{IsSuggestion, ProbeScope}; #[derive(Clone, Copy, Debug)] pub struct MethodCallee<'tcx> { @@ -106,7 +106,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { -> bool { let mode = probe::Mode::MethodCall; match self.probe_for_name(span, mode, method_name, IsSuggestion(false), - self_ty, call_expr_id) { + self_ty, call_expr_id, ProbeScope::TraitsInScope) { Ok(..) => true, Err(NoMatch(..)) => false, Err(Ambiguity(..)) => true, @@ -142,10 +142,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { call_expr, self_expr); - let mode = probe::Mode::MethodCall; - let self_ty = self.resolve_type_vars_if_possible(&self_ty); - let pick = self.probe_for_name(span, mode, segment.name, IsSuggestion(false), - self_ty, call_expr.id)?; + let pick = self.lookup_probe( + span, + segment.name, + self_ty, + call_expr, + ProbeScope::TraitsInScope + )?; if let Some(import_id) = pick.import_id { let import_def_id = self.tcx.hir.local_def_id(import_id); @@ -155,12 +158,53 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.check_stability(pick.item.def_id, call_expr.id, span); - Ok(self.confirm_method(span, - self_expr, - call_expr, - self_ty, - pick, - segment)) + let result = self.confirm_method(span, + self_expr, + call_expr, + self_ty, + pick.clone(), + segment); + + if result.rerun { + // We probe again, taking all traits into account (not only those in scope). + if let Ok(new_pick) = self.lookup_probe(span, + segment.name, + self_ty, + call_expr, + ProbeScope::AllTraits) { + // If we find a different result, the caller probably forgot to import the trait. + // We span an error with an appropriate help message. + if new_pick != pick { + let error = MethodError::NoMatch( + NoMatchData::new(Vec::new(), + Vec::new(), + vec![new_pick.item.container.id()], + probe::Mode::MethodCall) + ); + self.report_method_error(span, + self_ty, + segment.name, + Some(self_expr), + error, + None); + } + } + } + + Ok(result.callee) + } + + fn lookup_probe(&self, + span: Span, + method_name: ast::Name, + self_ty: ty::Ty<'tcx>, + call_expr: &'gcx hir::Expr, + scope: ProbeScope) + -> probe::PickResult<'tcx> { + let mode = probe::Mode::MethodCall; + let self_ty = self.resolve_type_vars_if_possible(&self_ty); + self.probe_for_name(span, mode, method_name, IsSuggestion(false), + self_ty, call_expr.id, scope) } /// `lookup_method_in_trait` is used for overloaded operators. @@ -299,7 +343,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { -> Result> { let mode = probe::Mode::Path; let pick = self.probe_for_name(span, mode, method_name, IsSuggestion(false), - self_ty, expr_id)?; + self_ty, expr_id, ProbeScope::TraitsInScope)?; if let Some(import_id) = pick.import_id { let import_def_id = self.tcx.hir.local_def_id(import_id); diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index dfc5cd00b6ea..3195b10404d1 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -106,7 +106,7 @@ enum CandidateKind<'tcx> { ty::PolyTraitRef<'tcx>), } -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct Pick<'tcx> { pub item: ty::AssociatedItem, pub kind: PickKind<'tcx>, @@ -130,7 +130,7 @@ pub struct Pick<'tcx> { pub unsize: Option>, } -#[derive(Clone,Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum PickKind<'tcx> { InherentImplPick, ExtensionImplPick(// Impl @@ -155,6 +155,15 @@ pub enum Mode { Path, } +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum ProbeScope { + // Assemble candidates coming only from traits in scope. + TraitsInScope, + + // Assemble candidates coming from all traits. + AllTraits, +} + impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// This is used to offer suggestions to users. It returns methods /// that could have been called which have the desired return @@ -175,14 +184,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { scope_expr_id); let method_names = self.probe_op(span, mode, LookingFor::ReturnType(return_type), IsSuggestion(true), - self_ty, scope_expr_id, + self_ty, scope_expr_id, ProbeScope::TraitsInScope, |probe_cx| Ok(probe_cx.candidate_method_names())) .unwrap_or(vec![]); method_names .iter() .flat_map(|&method_name| { match self.probe_for_name(span, mode, method_name, IsSuggestion(true), self_ty, - scope_expr_id) { + scope_expr_id, ProbeScope::TraitsInScope) { Ok(pick) => Some(pick.item), Err(_) => None, } @@ -196,7 +205,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { item_name: ast::Name, is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, - scope_expr_id: ast::NodeId) + scope_expr_id: ast::NodeId, + scope: ProbeScope) -> PickResult<'tcx> { debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})", self_ty, @@ -208,6 +218,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { is_suggestion, self_ty, scope_expr_id, + scope, |probe_cx| probe_cx.pick()) } @@ -218,6 +229,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, scope_expr_id: ast::NodeId, + scope: ProbeScope, op: OP) -> Result> where OP: FnOnce(ProbeContext<'a, 'gcx, 'tcx>) -> Result> @@ -275,8 +287,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mut probe_cx = ProbeContext::new(self, span, mode, looking_for, steps, opt_simplified_steps); + probe_cx.assemble_inherent_candidates(); - probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?; + match scope { + ProbeScope::TraitsInScope => + probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?, + ProbeScope::AllTraits => + probe_cx.assemble_extension_candidates_for_all_traits()?, + }; op(probe_cx) }) } diff --git a/src/test/compile-fail/issue-35976.rs b/src/test/compile-fail/issue-35976.rs new file mode 100644 index 000000000000..194616c94437 --- /dev/null +++ b/src/test/compile-fail/issue-35976.rs @@ -0,0 +1,33 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod private { + pub trait Future { + fn wait(&self) where Self: Sized; + } + + impl Future for Box { + fn wait(&self) { } + } +} + +//use private::Future; + +fn bar(arg: Box) { + arg.wait(); + //~^ ERROR no method named `wait` found for type `std::boxed::Box` + //~| the following trait is implemented but not in scope + //~| ERROR the trait bound `private::Future + 'static: std::marker::Sized` is not satisfied + //~| `private::Future + 'static` does not have a constant size known at compile-time +} + +fn main() { + +} From da12c4f8e543cecb3c3841bf4a099522950d3b70 Mon Sep 17 00:00:00 2001 From: scalexm Date: Wed, 2 Aug 2017 12:17:34 +0200 Subject: [PATCH 175/213] Handle ambiguous cases --- src/librustc_typeck/check/method/mod.rs | 55 +++++++++++++++---------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index f875511a9f59..f929a7ef49e1 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -167,27 +167,40 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if result.rerun { // We probe again, taking all traits into account (not only those in scope). - if let Ok(new_pick) = self.lookup_probe(span, - segment.name, - self_ty, - call_expr, - ProbeScope::AllTraits) { - // If we find a different result, the caller probably forgot to import the trait. - // We span an error with an appropriate help message. - if new_pick != pick { - let error = MethodError::NoMatch( - NoMatchData::new(Vec::new(), - Vec::new(), - vec![new_pick.item.container.id()], - probe::Mode::MethodCall) - ); - self.report_method_error(span, - self_ty, - segment.name, - Some(self_expr), - error, - None); - } + let candidates = + match self.lookup_probe(span, + segment.name, + self_ty, + call_expr, + ProbeScope::AllTraits) { + Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container.id()], + Err(MethodError::Ambiguity(ref sources)) => { + sources.iter() + .filter_map(|source| { + match *source { + // Note: this cannot come from an inherent impl, + // because the first probe succeeded. + ImplSource(def) => self.tcx.trait_id_of_impl(def), + TraitSource(_) => None, + } + }) + .collect() + } + _ => Vec::new(), + }; + + // If we find a different result, the caller probably forgot to import a trait. + // We span an error with an appropriate help message. + if !candidates.is_empty() { + let error = MethodError::NoMatch( + NoMatchData::new(Vec::new(), Vec::new(), candidates, probe::Mode::MethodCall) + ); + self.report_method_error(span, + self_ty, + segment.name, + Some(self_expr), + error, + None); } } From 2e8e75f50f87dca154342790ba03401037a3c52e Mon Sep 17 00:00:00 2001 From: scalexm Date: Thu, 3 Aug 2017 13:50:06 +0200 Subject: [PATCH 176/213] Tweak error message --- src/librustc_typeck/check/method/confirm.rs | 8 ++- src/librustc_typeck/check/method/mod.rs | 25 +++---- src/librustc_typeck/check/method/suggest.rs | 71 +++++++++++++------- src/test/{compile-fail => ui}/issue-35976.rs | 6 +- src/test/ui/issue-35976.stderr | 11 +++ 5 files changed, 77 insertions(+), 44 deletions(-) rename src/test/{compile-fail => ui}/issue-35976.rs (67%) create mode 100644 src/test/ui/issue-35976.stderr diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index 0ab75ad622ce..fd148062372f 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -110,8 +110,12 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { self.unify_receivers(self_ty, method_sig.inputs()[0]); // Add any trait/regions obligations specified on the method's type parameters. - let method_ty = self.tcx.mk_fn_ptr(ty::Binder(method_sig)); - self.add_obligations(method_ty, all_substs, &method_predicates); + // We won't add these if we encountered an illegal sized bound, so that we can use + // a custom error in that case. + if !rerun { + let method_ty = self.tcx.mk_fn_ptr(ty::Binder(method_sig)); + self.add_obligations(method_ty, all_substs, &method_predicates); + } // Create the final `MethodCallee`. let callee = MethodCallee { diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index f929a7ef49e1..eda17ab02c49 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -60,6 +60,10 @@ pub enum MethodError<'tcx> { // Found an applicable method, but it is not visible. PrivateMatch(Def), + + // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have + // forgotten to import a trait. + IllegalSizedBound(Vec), } // Contains a list of static methods that may apply, a list of unsatisfied trait predicates which @@ -112,6 +116,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Err(Ambiguity(..)) => true, Err(ClosureAmbiguity(..)) => true, Err(PrivateMatch(..)) => allow_private, + Err(IllegalSizedBound(..)) => true, } } @@ -173,13 +178,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self_ty, call_expr, ProbeScope::AllTraits) { + + // If we find a different result the caller probably forgot to import a trait. Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container.id()], - Err(MethodError::Ambiguity(ref sources)) => { + Err(Ambiguity(ref sources)) => { sources.iter() .filter_map(|source| { match *source { // Note: this cannot come from an inherent impl, - // because the first probe succeeded. + // because the first probing succeeded. ImplSource(def) => self.tcx.trait_id_of_impl(def), TraitSource(_) => None, } @@ -189,19 +196,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => Vec::new(), }; - // If we find a different result, the caller probably forgot to import a trait. - // We span an error with an appropriate help message. - if !candidates.is_empty() { - let error = MethodError::NoMatch( - NoMatchData::new(Vec::new(), Vec::new(), candidates, probe::Mode::MethodCall) - ); - self.report_method_error(span, - self_ty, - segment.name, - Some(self_expr), - error, - None); - } + return Err(IllegalSizedBound(candidates)); } Ok(result.callee) diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index 4faf71e0cc94..c480febdec66 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -315,9 +315,44 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let msg = format!("{} `{}` is private", def.kind_name(), item_name); self.tcx.sess.span_err(span, &msg); } + + MethodError::IllegalSizedBound(candidates) => { + let msg = format!("the `{}` method cannot be invoked on a trait object", item_name); + let mut err = self.sess().struct_span_err(span, &msg); + if !candidates.is_empty() { + let help = format!("{an}other candidate{s} {were} found in the following \ + trait{s}, perhaps add a `use` for {one_of_them}:", + an = if candidates.len() == 1 {"an" } else { "" }, + s = if candidates.len() == 1 { "" } else { "s" }, + were = if candidates.len() == 1 { "was" } else { "were" }, + one_of_them = if candidates.len() == 1 { + "it" + } else { + "one_of_them" + }); + self.suggest_use_candidates(&mut err, help, candidates); + } + err.emit(); + } } } + fn suggest_use_candidates(&self, + err: &mut DiagnosticBuilder, + mut msg: String, + candidates: Vec) { + let limit = if candidates.len() == 5 { 5 } else { 4 }; + for (i, trait_did) in candidates.iter().take(limit).enumerate() { + msg.push_str(&format!("\ncandidate #{}: `use {};`", + i + 1, + self.tcx.item_path_str(*trait_did))); + } + if candidates.len() > limit { + msg.push_str(&format!("\nand {} others", candidates.len() - limit)); + } + err.note(&msg[..]); + } + fn suggest_traits_to_import(&self, err: &mut DiagnosticBuilder, span: Span, @@ -330,30 +365,20 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { candidates.sort(); candidates.dedup(); err.help("items from traits can only be used if the trait is in scope"); - let mut msg = format!("the following {traits_are} implemented but not in scope, \ - perhaps add a `use` for {one_of_them}:", - traits_are = if candidates.len() == 1 { - "trait is" - } else { - "traits are" - }, - one_of_them = if candidates.len() == 1 { - "it" - } else { - "one of them" - }); - - let limit = if candidates.len() == 5 { 5 } else { 4 }; - for (i, trait_did) in candidates.iter().take(limit).enumerate() { - msg.push_str(&format!("\ncandidate #{}: `use {};`", - i + 1, - self.tcx.item_path_str(*trait_did))); - } - if candidates.len() > limit { - msg.push_str(&format!("\nand {} others", candidates.len() - limit)); - } - err.note(&msg[..]); + let msg = format!("the following {traits_are} implemented but not in scope, \ + perhaps add a `use` for {one_of_them}:", + traits_are = if candidates.len() == 1 { + "trait is" + } else { + "traits are" + }, + one_of_them = if candidates.len() == 1 { + "it" + } else { + "one of them" + }); + self.suggest_use_candidates(err, msg, candidates); return; } diff --git a/src/test/compile-fail/issue-35976.rs b/src/test/ui/issue-35976.rs similarity index 67% rename from src/test/compile-fail/issue-35976.rs rename to src/test/ui/issue-35976.rs index 194616c94437..169d7b559167 100644 --- a/src/test/compile-fail/issue-35976.rs +++ b/src/test/ui/issue-35976.rs @@ -22,10 +22,8 @@ mod private { fn bar(arg: Box) { arg.wait(); - //~^ ERROR no method named `wait` found for type `std::boxed::Box` - //~| the following trait is implemented but not in scope - //~| ERROR the trait bound `private::Future + 'static: std::marker::Sized` is not satisfied - //~| `private::Future + 'static` does not have a constant size known at compile-time + //~^ ERROR the `wait` method cannot be invoked on a trait object + //~| another candidate was found in the following trait, perhaps add a `use` for it: } fn main() { diff --git a/src/test/ui/issue-35976.stderr b/src/test/ui/issue-35976.stderr new file mode 100644 index 000000000000..9fb67449734b --- /dev/null +++ b/src/test/ui/issue-35976.stderr @@ -0,0 +1,11 @@ +error: the `wait` method cannot be invoked on a trait object + --> $DIR/issue-35976.rs:24:9 + | +24 | arg.wait(); + | ^^^^ + | + = note: another candidate was found in the following trait, perhaps add a `use` for it: + candidate #1: `use private::Future;` + +error: aborting due to previous error + From b34c5a23ab0996321a414d47729a9076364a79c6 Mon Sep 17 00:00:00 2001 From: Michael Woerister Date: Thu, 3 Aug 2017 14:45:01 +0200 Subject: [PATCH 177/213] incr.comp.: Make ConstEval dep-node anonymous. --- src/librustc/dep_graph/dep_node.rs | 16 +++++++------ src/librustc/ty/maps.rs | 37 +++++++++++------------------- 2 files changed, 22 insertions(+), 31 deletions(-) diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 800689f4638d..c6f9cb2fcead 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -66,7 +66,6 @@ use hir::map::DefPathHash; use ich::Fingerprint; use ty::{TyCtxt, Instance, InstanceDef}; use ty::fast_reject::SimplifiedType; -use ty::subst::Substs; use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; use ich::StableHashingContext; use std::fmt; @@ -104,6 +103,8 @@ macro_rules! define_dep_nodes { match *self { $( DepKind :: $variant => { + $(return !anon_attr_to_bool!($anon);)* + // tuple args $({ return <( $($tuple_arg,)* ) as DepNodeParams> @@ -112,6 +113,7 @@ macro_rules! define_dep_nodes { // struct args $({ + return <( $($struct_arg_ty,)* ) as DepNodeParams> ::CAN_RECONSTRUCT_QUERY_KEY; })* @@ -445,17 +447,17 @@ define_dep_nodes!( <'tcx> [] TypeckBodiesKrate, [] TypeckTables(DefId), [] HasTypeckTables(DefId), - [] ConstEval { def_id: DefId, substs: &'tcx Substs<'tcx> }, + [anon] ConstEval, [] SymbolName(DefId), [] InstanceSymbolName { instance: Instance<'tcx> }, [] SpecializationGraph(DefId), [] ObjectSafety(DefId), - [anon] IsCopy(DefId), - [anon] IsSized(DefId), - [anon] IsFreeze(DefId), - [anon] NeedsDrop(DefId), - [anon] Layout(DefId), + [anon] IsCopy, + [anon] IsSized, + [anon] IsFreeze, + [anon] NeedsDrop, + [anon] Layout, // The set of impls for a given trait. [] TraitImpls(DefId), diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs index d62d8f986c23..a2e335c00b26 100644 --- a/src/librustc/ty/maps.rs +++ b/src/librustc/ty/maps.rs @@ -9,7 +9,7 @@ // except according to those terms. use dep_graph::{DepConstructor, DepNode, DepNodeIndex}; -use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE}; +use hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use hir::def::Def; use hir; use middle::const_val; @@ -1036,10 +1036,9 @@ fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { DepConstructor::TypeckBodiesKrate } -fn const_eval_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) +fn const_eval_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) -> DepConstructor<'tcx> { - let (def_id, substs) = key.value; - DepConstructor::ConstEval { def_id, substs } + DepConstructor::ConstEval } fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { @@ -1054,32 +1053,22 @@ fn relevant_trait_impls_for<'tcx>((def_id, t): (DefId, SimplifiedType)) -> DepCo DepConstructor::RelevantTraitImpls(def_id, t) } -fn is_copy_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::IsCopy(def_id) +fn is_copy_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsCopy } -fn is_sized_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::IsSized(def_id) +fn is_sized_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsSized } -fn is_freeze_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::IsFreeze(def_id) +fn is_freeze_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsFreeze } -fn needs_drop_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::NeedsDrop(def_id) +fn needs_drop_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::NeedsDrop } -fn layout_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - let def_id = ty::item_path::characteristic_def_id_of_type(key.value) - .unwrap_or(DefId::local(CRATE_DEF_INDEX)); - DepConstructor::Layout(def_id) +fn layout_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::Layout } From eac01f123d20f4a606496f3f0d1511edffaf7b88 Mon Sep 17 00:00:00 2001 From: Ian Douglas Scott Date: Fri, 28 Jul 2017 16:34:16 -0700 Subject: [PATCH 178/213] Implement AsRawHandle for Std* on Windows --- src/libstd/sys/windows/ext/io.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/libstd/sys/windows/ext/io.rs b/src/libstd/sys/windows/ext/io.rs index 2ddb6c65fd35..829e44cebf77 100644 --- a/src/libstd/sys/windows/ext/io.rs +++ b/src/libstd/sys/windows/ext/io.rs @@ -15,6 +15,7 @@ use os::windows::raw; use net; use sys_common::{self, AsInner, FromInner, IntoInner}; use sys; +use io; use sys::c; /// Raw HANDLEs. @@ -71,6 +72,27 @@ impl AsRawHandle for fs::File { } } +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stdin { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) } as RawHandle + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stdout { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) } as RawHandle + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stderr { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) } as RawHandle + } +} + #[stable(feature = "from_raw_os", since = "1.1.0")] impl FromRawHandle for fs::File { unsafe fn from_raw_handle(handle: RawHandle) -> fs::File { From 66317a39a651560bbd77fa3cac984e7e0ac580c0 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Thu, 3 Aug 2017 22:55:17 +0200 Subject: [PATCH 179/213] Update highlight colors --- src/librustdoc/html/static/styles/main.css | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/librustdoc/html/static/styles/main.css b/src/librustdoc/html/static/styles/main.css index 63e7510b22ec..16962811ea02 100644 --- a/src/librustdoc/html/static/styles/main.css +++ b/src/librustdoc/html/static/styles/main.css @@ -74,8 +74,9 @@ pre { .content .highlighted.mod { background-color: #afc6e4; } .content .highlighted.enum { background-color: #b4d1b9; } .content .highlighted.struct { background-color: #e7b1a0; } -.content .highlighted.fn { background-color: #c6afb3; } -.content .highlighted.method { background-color: #c6afb3; } +.content .highlighted.union { background-color: #b7bd49; } +.content .highlighted.fn, +.content .highlighted.method, .content .highlighted.tymethod { background-color: #c6afb3; } .content .highlighted.type { background-color: #c6afb3; } From 9427bb36f62cbafddfff60ddcaf3494d7fb96f66 Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Thu, 3 Aug 2017 14:55:01 -0700 Subject: [PATCH 180/213] Fix a dangling symlink bug in `remove_dir_all()` on Solaris This fixes a handful of long-failing tests. --- src/libstd/sys/unix/fs.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs index 59dceba89532..4e6fde5c29de 100644 --- a/src/libstd/sys/unix/fs.rs +++ b/src/libstd/sys/unix/fs.rs @@ -284,12 +284,7 @@ impl DirEntry { lstat(&self.path()) } - #[cfg(target_os = "solaris")] - pub fn file_type(&self) -> io::Result { - stat(&self.path()).map(|m| m.file_type()) - } - - #[cfg(target_os = "haiku")] + #[cfg(any(target_os = "solaris", target_os = "haiku"))] pub fn file_type(&self) -> io::Result { lstat(&self.path()).map(|m| m.file_type()) } From 45b90ef52e65377cf1a1483a0d9dd8637de81de3 Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Thu, 3 Aug 2017 15:38:34 -0700 Subject: [PATCH 181/213] Some tests use `res_init()` and need `-lresolv` on Solaris This is a follow-up to ea23e50f, which fixed it for the build. --- src/test/run-make/tools.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/run-make/tools.mk b/src/test/run-make/tools.mk index 693573d53a4a..d13ba11e96a3 100644 --- a/src/test/run-make/tools.mk +++ b/src/test/run-make/tools.mk @@ -82,7 +82,7 @@ ifeq ($(UNAME),Bitrig) EXTRACXXFLAGS := -lc++ -lc++abi else ifeq ($(UNAME),SunOS) - EXTRACFLAGS := -lm -lpthread -lposix4 -lsocket + EXTRACFLAGS := -lm -lpthread -lposix4 -lsocket -lresolv else ifeq ($(UNAME),OpenBSD) EXTRACFLAGS := -lm -lpthread From ced1fda5659d324c9f4f98968de2bc893c456aa4 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Thu, 3 Aug 2017 15:42:05 -0700 Subject: [PATCH 182/213] Exclude Windows from LLVM_LINK_LLVM_DYLIB --- src/bootstrap/native.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index 595f90be1dd6..ce0052a5fb6a 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -125,11 +125,19 @@ impl Step for Llvm { .define("WITH_POLLY", "OFF") .define("LLVM_ENABLE_TERMINFO", "OFF") .define("LLVM_ENABLE_LIBEDIT", "OFF") - .define("LLVM_LINK_LLVM_DYLIB", "ON") .define("LLVM_PARALLEL_COMPILE_JOBS", build.jobs().to_string()) .define("LLVM_TARGET_ARCH", target.split('-').next().unwrap()) .define("LLVM_DEFAULT_TARGET_TRIPLE", target); + + // This setting makes the LLVM tools link to the dynamic LLVM library, + // which saves both memory during parallel links and overall disk space + // for the tools. We don't distribute any of those tools, so this is + // just a local concern. However, this doesn't seem to work on Windows. + if !target.contains("windows") { + cfg.define("LLVM_LINK_LLVM_DYLIB", "ON"); + } + if target.contains("msvc") { cfg.define("LLVM_USE_CRT_DEBUG", "MT"); cfg.define("LLVM_USE_CRT_RELEASE", "MT"); From 64e426e8e9fff27a7dc0a1bdf297bf5fd3f10b15 Mon Sep 17 00:00:00 2001 From: Ian Douglas Scott Date: Thu, 3 Aug 2017 15:54:53 -0700 Subject: [PATCH 183/213] Fix AsRawHandle --- src/libstd/sys/windows/ext/io.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libstd/sys/windows/ext/io.rs b/src/libstd/sys/windows/ext/io.rs index 829e44cebf77..90128dda088a 100644 --- a/src/libstd/sys/windows/ext/io.rs +++ b/src/libstd/sys/windows/ext/io.rs @@ -75,21 +75,21 @@ impl AsRawHandle for fs::File { #[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawHandle for io::Stdin { fn as_raw_handle(&self) -> RawHandle { - unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) } as RawHandle + unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle } } } #[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawHandle for io::Stdout { fn as_raw_handle(&self) -> RawHandle { - unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) } as RawHandle + unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle } } } #[stable(feature = "asraw_stdio", since = "1.21.0")] impl AsRawHandle for io::Stderr { fn as_raw_handle(&self) -> RawHandle { - unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) } as RawHandle + unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle } } } From 497c5a34da45527f6e8a2d5a325293da6724dafb Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Thu, 3 Aug 2017 17:18:19 -0700 Subject: [PATCH 184/213] Solaris linker options need to be accounted for in one test. This is a follow-up to f189d7a6937 and 9d11b089ad1. While `-z ignore` is what needs to be passed to the Solaris linker, because gcc is used as the default linker, both that form and `-Wl,-z -Wl,ignore` (including extra double quotes) need to be taken into account, which explains the more complex regular expression. --- src/test/run-make/codegen-options-parsing/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/run-make/codegen-options-parsing/Makefile b/src/test/run-make/codegen-options-parsing/Makefile index 2b8b0712cc7d..dc46a8a04ef8 100644 --- a/src/test/run-make/codegen-options-parsing/Makefile +++ b/src/test/run-make/codegen-options-parsing/Makefile @@ -25,7 +25,7 @@ all: # Should not link dead code... $(RUSTC) -Z print-link-args dummy.rs 2>&1 | \ - grep -e '--gc-sections' -e '-dead_strip' -e '/OPT:REF' + grep -e '--gc-sections' -e '-z[^ ]* [^ ]*\' -e '-dead_strip' -e '/OPT:REF' # ... unless you specifically ask to keep it $(RUSTC) -Z print-link-args -C link-dead-code dummy.rs 2>&1 | \ - (! grep -e '--gc-sections' -e '-dead_strip' -e '/OPT:REF') + (! grep -e '--gc-sections' -e '-z[^ ]* [^ ]*\' -e '-dead_strip' -e '/OPT:REF') From 9144755a915159f13fee9a2cbeee724c5138a2a7 Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Thu, 3 Aug 2017 17:19:19 -0700 Subject: [PATCH 185/213] Recognize SPARC in more tests where architecture matters. --- src/test/compile-fail/asm-bad-clobber.rs | 1 + src/test/compile-fail/asm-in-bad-modifier.rs | 1 + src/test/compile-fail/asm-misplaced-option.rs | 1 + src/test/compile-fail/asm-out-assign-imm.rs | 1 + src/test/compile-fail/asm-out-no-modifier.rs | 1 + src/test/compile-fail/asm-out-read-uninit.rs | 1 + src/test/run-pass/conditional-compile-arch.rs | 3 +++ src/test/run-pass/union/union-basic.rs | 1 + 8 files changed, 10 insertions(+) diff --git a/src/test/compile-fail/asm-bad-clobber.rs b/src/test/compile-fail/asm-bad-clobber.rs index 145662fd87c9..b863e90a3b71 100644 --- a/src/test/compile-fail/asm-bad-clobber.rs +++ b/src/test/compile-fail/asm-bad-clobber.rs @@ -14,6 +14,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm, rustc_attrs)] diff --git a/src/test/compile-fail/asm-in-bad-modifier.rs b/src/test/compile-fail/asm-in-bad-modifier.rs index f0467e75223b..cae41332795d 100644 --- a/src/test/compile-fail/asm-in-bad-modifier.rs +++ b/src/test/compile-fail/asm-in-bad-modifier.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/compile-fail/asm-misplaced-option.rs b/src/test/compile-fail/asm-misplaced-option.rs index 37a267535317..e634238c6e17 100644 --- a/src/test/compile-fail/asm-misplaced-option.rs +++ b/src/test/compile-fail/asm-misplaced-option.rs @@ -14,6 +14,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm, rustc_attrs)] diff --git a/src/test/compile-fail/asm-out-assign-imm.rs b/src/test/compile-fail/asm-out-assign-imm.rs index f95e4410381d..546d402252e2 100644 --- a/src/test/compile-fail/asm-out-assign-imm.rs +++ b/src/test/compile-fail/asm-out-assign-imm.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/compile-fail/asm-out-no-modifier.rs b/src/test/compile-fail/asm-out-no-modifier.rs index acf575c003a7..2e843ddac822 100644 --- a/src/test/compile-fail/asm-out-no-modifier.rs +++ b/src/test/compile-fail/asm-out-no-modifier.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/compile-fail/asm-out-read-uninit.rs b/src/test/compile-fail/asm-out-read-uninit.rs index bd180f6e5ebd..c85a097b962c 100644 --- a/src/test/compile-fail/asm-out-read-uninit.rs +++ b/src/test/compile-fail/asm-out-read-uninit.rs @@ -11,6 +11,7 @@ // ignore-s390x // ignore-emscripten // ignore-powerpc +// ignore-sparc #![feature(asm)] diff --git a/src/test/run-pass/conditional-compile-arch.rs b/src/test/run-pass/conditional-compile-arch.rs index 6e3e4be0d8e8..3d8bf9333fd2 100644 --- a/src/test/run-pass/conditional-compile-arch.rs +++ b/src/test/run-pass/conditional-compile-arch.rs @@ -39,3 +39,6 @@ pub fn main() { } #[cfg(target_arch = "wasm32")] pub fn main() { } + +#[cfg(target_arch = "sparc64")] +pub fn main() { } diff --git a/src/test/run-pass/union/union-basic.rs b/src/test/run-pass/union/union-basic.rs index 5e5b2d4d7ce7..de744520cc6b 100644 --- a/src/test/run-pass/union/union-basic.rs +++ b/src/test/run-pass/union/union-basic.rs @@ -12,6 +12,7 @@ // FIXME: This test case makes little-endian assumptions. // ignore-s390x +// ignore-sparc extern crate union; use std::mem::{size_of, align_of, zeroed}; From 67044501bc851f36327e429c0b7ab935d375d71a Mon Sep 17 00:00:00 2001 From: Florian Zeitz Date: Fri, 4 Aug 2017 02:27:30 +0200 Subject: [PATCH 186/213] trans: Reuse immediate value in call to call_memset() --- src/librustc_trans/mir/rvalue.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 0485054a12ae..a23e1a0684bf 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -122,8 +122,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if common::val_ty(v) == Type::i8(bcx.ccx) { let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); let align = C_i32(bcx.ccx, align as i32); - let fill = tr_elem.immediate(); - base::call_memset(&bcx, base, fill, size, align, false); + base::call_memset(&bcx, base, v, size, align, false); return bcx; } } From c151220a84e40b65e45308cc0f3bbea4466d3acf Mon Sep 17 00:00:00 2001 From: Tobias Schaffner Date: Thu, 3 Aug 2017 10:37:11 +0200 Subject: [PATCH 187/213] Add L4Re Support in librustc_back Add support for x86_64-unknown-l4re-uclibc target, which covers the L4 Runtime Environment. --- src/librustc_back/target/l4re_base.rs | 32 +++++++++++++++++++ src/librustc_back/target/mod.rs | 3 ++ .../target/x86_64_unknown_l4re_uclibc.rs | 31 ++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 src/librustc_back/target/l4re_base.rs create mode 100644 src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs diff --git a/src/librustc_back/target/l4re_base.rs b/src/librustc_back/target/l4re_base.rs new file mode 100644 index 000000000000..998183d40150 --- /dev/null +++ b/src/librustc_back/target/l4re_base.rs @@ -0,0 +1,32 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use PanicStrategy; +use LinkerFlavor; +use target::{LinkArgs, TargetOptions}; +use std::default::Default; + +pub fn opts() -> TargetOptions { + let mut pre_link_args = LinkArgs::new(); + pre_link_args.insert(LinkerFlavor::Ld, vec![ + "-nostdlib".to_string(), + ]); + + TargetOptions { + executables: true, + has_elf_tls: false, + exe_allocation_crate: Some("alloc_system".to_string()), + panic_strategy: PanicStrategy::Abort, + linker: "ld".to_string(), + pre_link_args: pre_link_args, + target_family: Some("unix".to_string()), + .. Default::default() + } +} diff --git a/src/librustc_back/target/mod.rs b/src/librustc_back/target/mod.rs index 0dbfdb4d809e..08b94d5a01cb 100644 --- a/src/librustc_back/target/mod.rs +++ b/src/librustc_back/target/mod.rs @@ -69,6 +69,7 @@ mod solaris_base; mod windows_base; mod windows_msvc_base; mod thumb_base; +mod l4re_base; mod fuchsia_base; mod redox_base; @@ -193,6 +194,8 @@ supported_targets! { ("aarch64-unknown-fuchsia", aarch64_unknown_fuchsia), ("x86_64-unknown-fuchsia", x86_64_unknown_fuchsia), + ("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc), + ("x86_64-unknown-redox", x86_64_unknown_redox), ("i386-apple-ios", i386_apple_ios), diff --git a/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs b/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs new file mode 100644 index 000000000000..b447f8a989db --- /dev/null +++ b/src/librustc_back/target/x86_64_unknown_l4re_uclibc.rs @@ -0,0 +1,31 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use LinkerFlavor; +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::l4re_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); + + Ok(Target { + llvm_target: "x86_64-unknown-l4re-uclibc".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), + arch: "x86_64".to_string(), + target_os: "l4re".to_string(), + target_env: "uclibc".to_string(), + target_vendor: "unknown".to_string(), + linker_flavor: LinkerFlavor::Ld, + options: base, + }) +} From 6c46f4f11cdd56fcd12c86d121259c738b7a8376 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Fri, 4 Aug 2017 00:13:11 -0700 Subject: [PATCH 188/213] Use LLVM_LINK_LLVM_DYLIB only on linux-gnu and apple-darwin --- src/bootstrap/native.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index ce0052a5fb6a..ee0eca5d4824 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -133,8 +133,8 @@ impl Step for Llvm { // This setting makes the LLVM tools link to the dynamic LLVM library, // which saves both memory during parallel links and overall disk space // for the tools. We don't distribute any of those tools, so this is - // just a local concern. However, this doesn't seem to work on Windows. - if !target.contains("windows") { + // just a local concern. However, it doesn't work well everywhere. + if target.contains("linux-gnu") || target.contains("apple-darwin") { cfg.define("LLVM_LINK_LLVM_DYLIB", "ON"); } From e7e620d0cc4ca1a971d8381a65e64efd5b66e489 Mon Sep 17 00:00:00 2001 From: scalexm Date: Fri, 4 Aug 2017 12:04:34 +0200 Subject: [PATCH 189/213] Rename `ConfirmResult` fields --- src/librustc_typeck/check/method/confirm.rs | 10 +++++----- src/librustc_typeck/check/method/mod.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index fd148062372f..b6a5ce0a6ce5 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -40,7 +40,7 @@ impl<'a, 'gcx, 'tcx> Deref for ConfirmContext<'a, 'gcx, 'tcx> { pub struct ConfirmResult<'tcx> { pub callee: MethodCallee<'tcx>, - pub rerun: bool, + pub illegal_sized_bound: bool, } impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { @@ -99,12 +99,12 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that // something which derefs to `Self` actually implements the trait and the caller // wanted to make a static dispatch on it but forgot to import the trait. - // See test `src/test/compile-fail/issue-35976.rs`. + // See test `src/test/ui/issue-35976.rs`. // // In that case, we'll error anyway, but we'll also re-run the search with all traits // in scope, and if we find another method which can be used, we'll output an // appropriate hint suggesting to import the trait. - let rerun = self.predicates_require_illegal_sized_bound(&method_predicates); + let illegal_sized_bound = self.predicates_require_illegal_sized_bound(&method_predicates); // Unify the (adjusted) self type with what the method expects. self.unify_receivers(self_ty, method_sig.inputs()[0]); @@ -112,7 +112,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // Add any trait/regions obligations specified on the method's type parameters. // We won't add these if we encountered an illegal sized bound, so that we can use // a custom error in that case. - if !rerun { + if !illegal_sized_bound { let method_ty = self.tcx.mk_fn_ptr(ty::Binder(method_sig)); self.add_obligations(method_ty, all_substs, &method_predicates); } @@ -128,7 +128,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { self.convert_lvalue_derefs_to_mutable(); } - ConfirmResult { callee, rerun } + ConfirmResult { callee, illegal_sized_bound } } /////////////////////////////////////////////////////////////////////////// diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index eda17ab02c49..dd5b0cdda424 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -170,7 +170,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pick.clone(), segment); - if result.rerun { + if result.illegal_sized_bound { // We probe again, taking all traits into account (not only those in scope). let candidates = match self.lookup_probe(span, From a508a2e56b3c91be1e941eb6bd4aa7c30bd7dbfd Mon Sep 17 00:00:00 2001 From: Oliver Schneider Date: Fri, 4 Aug 2017 12:33:48 +0200 Subject: [PATCH 190/213] Uplift some comments to Doc comments --- src/librustc/ty/layout.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 009b0619bd75..e770f1d55dcf 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -581,14 +581,14 @@ pub struct Struct { pub min_size: Size, } -// Info required to optimize struct layout. +/// Info required to optimize struct layout. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] enum StructKind { - // A tuple, closure, or univariant which cannot be coerced to unsized. + /// A tuple, closure, or univariant which cannot be coerced to unsized. AlwaysSizedUnivariant, - // A univariant, the last field of which may be coerced to unsized. + /// A univariant, the last field of which may be coerced to unsized. MaybeUnsizedUnivariant, - // A univariant, but part of an enum. + /// A univariant, but part of an enum. EnumVariant, } @@ -1020,7 +1020,7 @@ pub enum Layout { /// TyRawPtr or TyRef with a !Sized pointee. FatPointer { metadata: Primitive, - // If true, the pointer cannot be null. + /// If true, the pointer cannot be null. non_zero: bool }, @@ -1031,8 +1031,8 @@ pub enum Layout { discr: Integer, signed: bool, non_zero: bool, - // Inclusive discriminant range. - // If min > max, it represents min...u64::MAX followed by 0...max. + /// Inclusive discriminant range. + /// If min > max, it represents min...u64::MAX followed by 0...max. // FIXME(eddyb) always use the shortest range, e.g. by finding // the largest space between two consecutive discriminants and // taking everything else as the (shortest) discriminant range. @@ -1043,7 +1043,7 @@ pub enum Layout { /// Single-case enums, and structs/tuples. Univariant { variant: Struct, - // If true, the structure is NonZero. + /// If true, the structure is NonZero. // FIXME(eddyb) use a newtype Layout kind for this. non_zero: bool }, @@ -1084,9 +1084,9 @@ pub enum Layout { StructWrappedNullablePointer { nndiscr: u64, nonnull: Struct, - // N.B. There is a 0 at the start, for LLVM GEP through a pointer. + /// N.B. There is a 0 at the start, for LLVM GEP through a pointer. discrfield: FieldPath, - // Like discrfield, but in source order. For debuginfo. + /// Like discrfield, but in source order. For debuginfo. discrfield_source: FieldPath } } @@ -1944,11 +1944,11 @@ pub enum SizeSkeleton<'tcx> { /// A potentially-fat pointer. Pointer { - // If true, this pointer is never null. + /// If true, this pointer is never null. non_zero: bool, - // The type which determines the unsized metadata, if any, - // of this pointer. Either a type parameter or a projection - // depending on one, with regions erased. + /// The type which determines the unsized metadata, if any, + /// of this pointer. Either a type parameter or a projection + /// depending on one, with regions erased. tail: Ty<'tcx> } } From 28c423d6928539ffa28b44d569e3ece6ea957083 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Mustoha?= Date: Fri, 4 Aug 2017 13:45:08 +0200 Subject: [PATCH 191/213] Unskip some tests on AArch64 --- src/test/run-pass/foreign-call-no-runtime.rs | 1 - src/test/run-pass/issue-13304.rs | 1 - src/test/run-pass/issue-16272.rs | 1 - src/test/run-pass/issue-20091.rs | 1 - src/test/run-pass/process-spawn-with-unicode-params.rs | 1 - src/test/run-pass/sigpipe-should-be-ignored.rs | 1 - 6 files changed, 6 deletions(-) diff --git a/src/test/run-pass/foreign-call-no-runtime.rs b/src/test/run-pass/foreign-call-no-runtime.rs index 697e9074c448..dd5c075c39b1 100644 --- a/src/test/run-pass/foreign-call-no-runtime.rs +++ b/src/test/run-pass/foreign-call-no-runtime.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten no threads support #![feature(libc)] diff --git a/src/test/run-pass/issue-13304.rs b/src/test/run-pass/issue-13304.rs index e1c2c5684fb5..5a743d7b5478 100644 --- a/src/test/run-pass/issue-13304.rs +++ b/src/test/run-pass/issue-13304.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten #![feature(io, process_capture)] diff --git a/src/test/run-pass/issue-16272.rs b/src/test/run-pass/issue-16272.rs index d4f3d15b320d..f86be2d7c993 100644 --- a/src/test/run-pass/issue-16272.rs +++ b/src/test/run-pass/issue-16272.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten use std::process::Command; diff --git a/src/test/run-pass/issue-20091.rs b/src/test/run-pass/issue-20091.rs index 52c7911075ae..1ee47a69d0c8 100644 --- a/src/test/run-pass/issue-20091.rs +++ b/src/test/run-pass/issue-20091.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ignore-aarch64 // ignore-emscripten #![feature(std_misc, os)] diff --git a/src/test/run-pass/process-spawn-with-unicode-params.rs b/src/test/run-pass/process-spawn-with-unicode-params.rs index d3d847127ee9..550c6d6ab670 100644 --- a/src/test/run-pass/process-spawn-with-unicode-params.rs +++ b/src/test/run-pass/process-spawn-with-unicode-params.rs @@ -16,7 +16,6 @@ // non-ASCII characters. The child process ensures all the strings are // intact. -// ignore-aarch64 // ignore-emscripten use std::io::prelude::*; diff --git a/src/test/run-pass/sigpipe-should-be-ignored.rs b/src/test/run-pass/sigpipe-should-be-ignored.rs index 4eb4720e8d7b..5aa4faa13656 100644 --- a/src/test/run-pass/sigpipe-should-be-ignored.rs +++ b/src/test/run-pass/sigpipe-should-be-ignored.rs @@ -11,7 +11,6 @@ // Be sure that when a SIGPIPE would have been received that the entire process // doesn't die in a ball of fire, but rather it's gracefully handled. -// ignore-aarch64 // ignore-emscripten use std::env; From ea6a6571758229382d3fcb3fcc9e273c9b854345 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Fri, 4 Aug 2017 08:21:28 -0400 Subject: [PATCH 192/213] Indicate why str::{get,get_mut} examples return None. --- src/liballoc/str.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index 4df13c509a83..bbdc36b8737c 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -328,11 +328,16 @@ impl str { /// # Examples /// /// ``` - /// let v = "πŸ—»βˆˆπŸŒ"; - /// assert_eq!(Some("πŸ—»"), v.get(0..4)); - /// assert!(v.get(1..).is_none()); - /// assert!(v.get(..8).is_none()); - /// assert!(v.get(..42).is_none()); + /// let mut v = String::from("πŸ—»βˆˆπŸŒ"); + /// + /// assert_eq!(Some("πŸ—»"), v.get(0..4); + /// + /// // indices not on UTF-8 sequence boundaries + /// assert!(v.get_mut(1..).is_none()); + /// assert!(v.get_mut(..8).is_none()); + /// + /// // out of bounds + /// assert!(v.get_mut(..42).is_none()); /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] #[inline] @@ -351,9 +356,14 @@ impl str { /// /// ``` /// let mut v = String::from("πŸ—»βˆˆπŸŒ"); + /// /// assert_eq!(Some("πŸ—»"), v.get_mut(0..4).map(|v| &*v)); + /// + /// // indices not on UTF-8 sequence boundaries /// assert!(v.get_mut(1..).is_none()); /// assert!(v.get_mut(..8).is_none()); + /// + /// // out of bounds /// assert!(v.get_mut(..42).is_none()); /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] From 11d6312abd614fca3970902f137225e0437d0a09 Mon Sep 17 00:00:00 2001 From: Florian Zeitz Date: Fri, 4 Aug 2017 16:58:12 +0200 Subject: [PATCH 193/213] codegen tests: Check type of `len` argument to `llvm.memset.*` based on the exact intrinsic used --- src/test/codegen/slice-init.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/codegen/slice-init.rs b/src/test/codegen/slice-init.rs index cb684af39536..569d937c812c 100644 --- a/src/test/codegen/slice-init.rs +++ b/src/test/codegen/slice-init.rs @@ -33,7 +33,7 @@ pub fn zero_len_array() { // CHECK-LABEL: @byte_array #[no_mangle] pub fn byte_array() { - // CHECK: call void @llvm.memset.p0i8.i{{[0-9]+}}(i8* {{.*}}, i8 7, i64 4 + // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 7, i[[WIDTH]] 4 // CHECK-NOT: br label %slice_loop_header{{.*}} let x = [7u8; 4]; drop(&x); @@ -49,7 +49,7 @@ enum Init { // CHECK-LABEL: @byte_enum_array #[no_mangle] pub fn byte_enum_array() { - // CHECK: call void @llvm.memset.p0i8.i{{[0-9]+}}(i8* {{.*}}, i8 {{.*}}, i64 4 + // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 {{.*}}, i[[WIDTH]] 4 // CHECK-NOT: br label %slice_loop_header{{.*}} let x = [Init::Memset; 4]; drop(&x); @@ -58,7 +58,7 @@ pub fn byte_enum_array() { // CHECK-LABEL: @zeroed_integer_array #[no_mangle] pub fn zeroed_integer_array() { - // CHECK: call void @llvm.memset.p0i8.i{{[0-9]+}}(i8* {{.*}}, i8 0, i64 16 + // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 0, i[[WIDTH]] 16 // CHECK-NOT: br label %slice_loop_header{{.*}} let x = [0u32; 4]; drop(&x); From 068710f28a203dd5edd50aef3b98963073bc3cd9 Mon Sep 17 00:00:00 2001 From: bjorn3 Date: Sat, 5 Aug 2017 10:42:53 +0200 Subject: [PATCH 194/213] Make some comments doc comments in librustc/middle/cstore.rs --- src/librustc/middle/cstore.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 48bddf2f7175..e0bb7a9b844a 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -18,9 +18,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// the rustc crate store interface. This also includes types that -// are *mostly* used as a part of that interface, but these should -// probably get a better home if someone can find one. +//! the rustc crate store interface. This also includes types that +//! are *mostly* used as a part of that interface, but these should +//! probably get a better home if someone can find one. use hir::def; use hir::def_id::{CrateNum, DefId, DefIndex}; @@ -55,8 +55,8 @@ pub struct LinkMeta { pub crate_hash: Svh, } -// Where a crate came from on the local filesystem. One of these three options -// must be non-None. +/// Where a crate came from on the local filesystem. One of these three options +/// must be non-None. #[derive(PartialEq, Clone, Debug)] pub struct CrateSource { pub dylib: Option<(PathBuf, PathKind)>, @@ -120,10 +120,14 @@ pub enum LinkagePreference { #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub enum NativeLibraryKind { - NativeStatic, // native static library (.a archive) - NativeStaticNobundle, // native static library, which doesn't get bundled into .rlibs - NativeFramework, // macOS-specific - NativeUnknown, // default way to specify a dynamic library + /// native static library (.a archive) + NativeStatic, + /// native static library, which doesn't get bundled into .rlibs + NativeStaticNobundle, + /// macOS-specific + NativeFramework, + /// default way to specify a dynamic library + NativeUnknown, } #[derive(Clone, Hash, RustcEncodable, RustcDecodable)] From de4f1a170f96f7e99f28dda534a7b76010499587 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Fri, 4 Aug 2017 18:01:34 -0400 Subject: [PATCH 195/213] Update str::split_at_mut example to demonstrate mutability. --- src/liballoc/str.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index bbdc36b8737c..bf375ca0c43a 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -330,7 +330,7 @@ impl str { /// ``` /// let mut v = String::from("πŸ—»βˆˆπŸŒ"); /// - /// assert_eq!(Some("πŸ—»"), v.get(0..4); + /// assert_eq!(Some("πŸ—»"), v.get(0..4)); /// /// // indices not on UTF-8 sequence boundaries /// assert!(v.get_mut(1..).is_none()); @@ -573,12 +573,16 @@ impl str { /// Basic usage: /// /// ``` + /// use std::ascii::AsciiExt; + /// /// let mut s = "Per Martin-LΓΆf".to_string(); - /// - /// let (first, last) = s.split_at_mut(3); - /// - /// assert_eq!("Per", first); - /// assert_eq!(" Martin-LΓΆf", last); + /// { + /// let (first, last) = s.split_at_mut(3); + /// first.make_ascii_uppercase(); + /// assert_eq!("PER", first); + /// assert_eq!(" Martin-LΓΆf", last); + /// } + /// assert_eq!("PER Martin-LΓΆf", s); /// ``` #[inline] #[stable(feature = "str_split_at", since = "1.4.0")] From 6722185abdab8c5b83109a375e3fe14bd6aa8dc4 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Fri, 4 Aug 2017 23:08:29 -0400 Subject: [PATCH 196/213] Indicate how to turn byte slices back into a string slice. --- src/liballoc/str.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index bf375ca0c43a..80317cd763b5 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -273,7 +273,10 @@ impl str { core_str::StrExt::is_char_boundary(self, index) } - /// Converts a string slice to a byte slice. + /// Converts a string slice to a byte slice. To convert the byte slice back + /// into a string slice, use the [`str::from_utf8`] function. + /// + /// [`str::from_utf8`]: ./str/fn.from_utf8.html /// /// # Examples /// @@ -289,7 +292,11 @@ impl str { core_str::StrExt::as_bytes(self) } - /// Converts a mutable string slice to a mutable byte slice. + /// Converts a mutable string slice to a mutable byte slice. To convert the + /// mutable byte slice back into a mutable string slice, use the + /// [`str::from_utf8_mut`] function. + /// + /// [`str::from_utf8_mut`]: ./str/fn.from_utf8_mut.html #[stable(feature = "str_mut_extras", since = "1.20.0")] #[inline(always)] pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { From 3bf1ba7987829fab555a767c77883d123ded729b Mon Sep 17 00:00:00 2001 From: Eric Daniels Date: Sat, 5 Aug 2017 13:15:53 -0400 Subject: [PATCH 197/213] Fix typo in coerce_forced_unit docstring --- src/librustc_typeck/check/coercion.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_typeck/check/coercion.rs b/src/librustc_typeck/check/coercion.rs index 968e893b9a00..e494bc152222 100644 --- a/src/librustc_typeck/check/coercion.rs +++ b/src/librustc_typeck/check/coercion.rs @@ -1046,7 +1046,7 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> } /// Indicates that one of the inputs is a "forced unit". This - /// occurs in a case like `if foo { ... };`, where the issing else + /// occurs in a case like `if foo { ... };`, where the missing else /// generates a "forced unit". Another example is a `loop { break; /// }`, where the `break` has no argument expression. We treat /// these cases slightly differently for error-reporting From 46fe8e99665d6a210f6dc16590127ee808e60366 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sat, 22 Jul 2017 01:17:53 +0200 Subject: [PATCH 198/213] Don't warn on unused field on union --- src/librustc/middle/dead.rs | 18 +++++++++++++++--- src/test/ui/union-fields.rs | 29 +++++++++++++++++++++++++++++ src/test/ui/union-fields.stderr | 14 ++++++++++++++ 3 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 src/test/ui/union-fields.rs create mode 100644 src/test/ui/union-fields.stderr diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 2238e464cbcd..91d25c680d87 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -13,7 +13,7 @@ // from live codes are live, and everything else is dead. use hir::map as hir_map; -use hir::{self, PatKind}; +use hir::{self, Item_, PatKind}; use hir::intravisit::{self, Visitor, NestedVisitorMap}; use hir::itemlikevisit::ItemLikeVisitor; @@ -558,8 +558,20 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { fn visit_struct_field(&mut self, field: &'tcx hir::StructField) { if self.should_warn_about_field(&field) { - self.warn_dead_code(field.id, field.span, - field.name, "field"); + let did = self.tcx.hir.get_parent_did(field.id); + if if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { + match self.tcx.hir.find(node_id) { + Some(hir_map::NodeItem(item)) => match item.node { + Item_::ItemUnion(_, _) => false, + _ => true, + }, + _ => true, + } + } else { + true + } { + self.warn_dead_code(field.id, field.span, field.name, "field"); + } } intravisit::walk_struct_field(self, field); diff --git a/src/test/ui/union-fields.rs b/src/test/ui/union-fields.rs new file mode 100644 index 000000000000..87a617301336 --- /dev/null +++ b/src/test/ui/union-fields.rs @@ -0,0 +1,29 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(dead_code)] + +union U { + x: u32, + y: f32, +} + +struct V { + x: u32, + y: u32, +} + +fn main() { + let u = U { x: 0x3f800000 }; + let _f = unsafe { u.y }; + let v = V { x: 0, y: 0 }; + println!("{}", v.x); +} + diff --git a/src/test/ui/union-fields.stderr b/src/test/ui/union-fields.stderr new file mode 100644 index 000000000000..d0f1a9214255 --- /dev/null +++ b/src/test/ui/union-fields.stderr @@ -0,0 +1,14 @@ +error: field is never used: `y` + --> $DIR/union-fields.rs:20:5 + | +20 | y: u32, + | ^^^^^^ + | +note: lint level defined here + --> $DIR/union-fields.rs:11:9 + | +11 | #![deny(dead_code)] + | ^^^^^^^^^ + +error: aborting due to previous error + From 59fcac6fa977630640fb79c4c97486b14ca66cee Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 30 Jul 2017 19:08:26 +0200 Subject: [PATCH 199/213] Improve dead code detection for unions --- src/librustc/hir/intravisit.rs | 2 +- src/librustc/middle/dead.rs | 46 +++++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index 57198d8ca0b7..e751241669da 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -877,7 +877,7 @@ pub fn walk_impl_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, impl_item_ref: &' pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) { visitor.visit_id(struct_definition.id()); - walk_list!(visitor, visit_struct_field, struct_definition.fields()); + walk_list!(visitor, visit_struct_field, struct_definition.fields().iter().rev()); } pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) { diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 91d25c680d87..2f0ee8d8f2fb 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -422,6 +422,7 @@ fn get_struct_ctor_id(item: &hir::Item) -> Option { struct DeadVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, live_symbols: Box>, + need_check_next_union_field: bool, } impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { @@ -537,6 +538,16 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { } } + fn visit_variant_data(&mut self, + s: &'tcx hir::VariantData, + _: ast::Name, + _: &'tcx hir::Generics, + _parent_id: ast::NodeId, + _: syntax_pos::Span) { + self.need_check_next_union_field = true; + intravisit::walk_struct_def(self, s) + } + fn visit_variant(&mut self, variant: &'tcx hir::Variant, g: &'tcx hir::Generics, @@ -557,23 +568,24 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { } fn visit_struct_field(&mut self, field: &'tcx hir::StructField) { - if self.should_warn_about_field(&field) { - let did = self.tcx.hir.get_parent_did(field.id); - if if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { - match self.tcx.hir.find(node_id) { - Some(hir_map::NodeItem(item)) => match item.node { - Item_::ItemUnion(_, _) => false, - _ => true, - }, - _ => true, - } - } else { - true - } { + if self.need_check_next_union_field { + if self.should_warn_about_field(&field) { self.warn_dead_code(field.id, field.span, field.name, "field"); + } else { + let did = self.tcx.hir.get_parent_did(field.id); + if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { + match self.tcx.hir.find(node_id) { + Some(hir_map::NodeItem(item)) => match item.node { + // If this is an union's field, it means all previous fields + // have been used as well so no need to check further. + Item_::ItemUnion(_, _) => self.need_check_next_union_field = false, + _ => {} + }, + _ => {} + } + } } } - intravisit::walk_struct_field(self, field); } @@ -615,6 +627,10 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); let krate = tcx.hir.krate(); let live_symbols = find_live(tcx, access_levels, krate); - let mut visitor = DeadVisitor { tcx: tcx, live_symbols: live_symbols }; + let mut visitor = DeadVisitor { + tcx: tcx, + live_symbols: live_symbols, + need_check_next_union_field: true, + }; intravisit::walk_crate(&mut visitor, krate); } From abc76ae7e98b0af80fb0426e68e0d271ac088da6 Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Sat, 5 Aug 2017 11:04:45 -0400 Subject: [PATCH 200/213] Bump 'src/doc/book' git submodule. Primarily to pick up this change: https://github.com/rust-lang/book/pull/866 ...to move this PR forward: https://github.com/rust-lang/rust/pull/43641 --- src/doc/book | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/doc/book b/src/doc/book index 4ee596df22f8..6f1a03dae6bc 160000 --- a/src/doc/book +++ b/src/doc/book @@ -1 +1 @@ -Subproject commit 4ee596df22f8ecaa9a0b2ddc0624b0104540dbf7 +Subproject commit 6f1a03dae6bcea44976918186f2d554186b3499c From bbdff02f8c6bac42c0488afbcfa9cdf1fbd3d282 Mon Sep 17 00:00:00 2001 From: Ryan Leckey Date: Sun, 6 Aug 2017 03:16:42 -0700 Subject: [PATCH 201/213] Preface 'cares' with 'only' --- src/libcore/iter/iterator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs index 1685dba3c5a6..2472efa14b30 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/iterator.rs @@ -1247,7 +1247,7 @@ pub trait Iterator { /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// - /// Because `collect()` cares about what you're collecting into, you can + /// Because `collect()` only cares about what you're collecting into, you can /// still use a partial type hint, `_`, with the turbofish: /// /// ``` From 00b362e332219e14c6516df622b0749be77b2ff9 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 6 Aug 2017 13:34:24 +0200 Subject: [PATCH 202/213] Fix invalid background highlights and add missing colors --- src/librustdoc/html/static/styles/main.css | 43 ++++++++++++---------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/src/librustdoc/html/static/styles/main.css b/src/librustdoc/html/static/styles/main.css index 16962811ea02..034c5307fc08 100644 --- a/src/librustdoc/html/static/styles/main.css +++ b/src/librustdoc/html/static/styles/main.css @@ -64,22 +64,6 @@ pre { background-color: #f6fdb0 !important; } -:target { background: #FDFFD3; } -.content .highlighted { - color: #000 !important; - background-color: #ccc; -} -.content .highlighted a, .content .highlighted span { color: #000 !important; } -.content .highlighted.trait { background-color: #fece7e; } -.content .highlighted.mod { background-color: #afc6e4; } -.content .highlighted.enum { background-color: #b4d1b9; } -.content .highlighted.struct { background-color: #e7b1a0; } -.content .highlighted.union { background-color: #b7bd49; } -.content .highlighted.fn, -.content .highlighted.method, -.content .highlighted.tymethod { background-color: #c6afb3; } -.content .highlighted.type { background-color: #c6afb3; } - .docblock h1, .docblock h2, .docblock h3, .docblock h4, .docblock h5 { border-bottom-color: #DDD; } @@ -98,15 +82,38 @@ pre { border-bottom-color: #ddd; } +:target { background: #FDFFD3; } +.content .highlighted { + color: #000 !important; + background-color: #ccc; +} +.content .highlighted a, .content .highlighted span { color: #000 !important; } +.content .highlighted.trait { background-color: #c7b6ff; } +.content .highlighted.mod, +.content .highlighted.externcrate { background-color: #afc6e4; } +.content .highlighted.enum { background-color: #b4d1b9; } +.content .highlighted.struct { background-color: #e7b1a0; } +.content .highlighted.union { background-color: #b7bd49; } +.content .highlighted.fn, +.content .highlighted.method, +.content .highlighted.tymethod { background-color: #c6afb3; } +.content .highlighted.type { background-color: #ffc891; } +.content .highlighted.macro { background-color: #8ce488; } +.content .highlighted.constant, +.content .highlighted.static { background-color: #c3e0ff; } +.content .highlighted.primitive { background-color: #9aecff; } + .content span.enum, .content a.enum, .block a.current.enum { color: #508157; } .content span.struct, .content a.struct, .block a.current.struct { color: #df3600; } .content span.type, .content a.type, .block a.current.type { color: #ba5d00; } .content span.macro, .content a.macro, .block a.current.macro { color: #068000; } .content span.union, .content a.union, .block a.current.union { color: #767b27; } -.content span.constant, .content a.constant, .block a.current.constant { color: #546e8a; } +.content span.constant, .content a.constant, .block a.current.constant, +.content span.static, .content a.static, .block a.current.static { color: #546e8a; } .content span.primitive, .content a.primitive, .block a.current.primitive { color: #2c8093; } .content span.externcrate, .content span.mod, .content a.mod, .block a.current.mod { color: #4d76ae; } +.content span.trait, .content a.trait, .block a.current.trait { color: #7c5af3; } .content span.fn, .content a.fn, .block a.current.fn, .content span.method, .content a.method, .block a.current.method, .content span.tymethod, .content a.tymethod, .block a.current.tymethod, @@ -137,8 +144,6 @@ a.test-arrow { color: #f5f5f5; } -.content span.trait, .content a.trait, .block a.current.trait { color: #7c5af3; } - .search-input { color: #555; box-shadow: 0 0 0 1px #e0e0e0, 0 0 0 2px transparent; From 90f54d00d313178a6246566a5f09cd6af1f7a099 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 6 Aug 2017 17:19:15 +0200 Subject: [PATCH 203/213] Improve union unused field detection --- src/librustc/hir/intravisit.rs | 2 +- src/librustc/middle/dead.rs | 55 ++++++++++++++++------------------ src/test/ui/union-fields.rs | 28 +++++++++-------- 3 files changed, 43 insertions(+), 42 deletions(-) diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index e751241669da..57198d8ca0b7 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -877,7 +877,7 @@ pub fn walk_impl_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, impl_item_ref: &' pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) { visitor.visit_id(struct_definition.id()); - walk_list!(visitor, visit_struct_field, struct_definition.fields().iter().rev()); + walk_list!(visitor, visit_struct_field, struct_definition.fields()); } pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) { diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 2f0ee8d8f2fb..c82cfb344967 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -189,6 +189,24 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { self.struct_has_extern_repr = had_extern_repr; self.inherited_pub_visibility = had_inherited_pub_visibility; } + + fn mark_as_used_if_union(&mut self, did: DefId) { + if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { + match self.tcx.hir.find(node_id) { + Some(hir_map::NodeItem(item)) => match item.node { + Item_::ItemUnion(ref variant, _) => { + if variant.fields().len() > 1 { + for field in variant.fields() { + self.live_symbols.insert(field.id); + } + } + } + _ => {} + }, + _ => {} + } + } + } } impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { @@ -221,6 +239,11 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { hir::ExprPath(ref qpath @ hir::QPath::TypeRelative(..)) => { let def = self.tables.qpath_def(qpath, expr.id); self.handle_definition(def); + self.mark_as_used_if_union(def.def_id()); + } + hir::ExprPath(ref qpath @ hir::QPath::Resolved(..)) => { + let def = self.tables.qpath_def(qpath, expr.id); + self.mark_as_used_if_union(def.def_id()); } hir::ExprMethodCall(..) => { self.lookup_and_handle_method(expr.id); @@ -422,7 +445,6 @@ fn get_struct_ctor_id(item: &hir::Item) -> Option { struct DeadVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, live_symbols: Box>, - need_check_next_union_field: bool, } impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { @@ -538,16 +560,6 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { } } - fn visit_variant_data(&mut self, - s: &'tcx hir::VariantData, - _: ast::Name, - _: &'tcx hir::Generics, - _parent_id: ast::NodeId, - _: syntax_pos::Span) { - self.need_check_next_union_field = true; - intravisit::walk_struct_def(self, s) - } - fn visit_variant(&mut self, variant: &'tcx hir::Variant, g: &'tcx hir::Generics, @@ -568,23 +580,9 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { } fn visit_struct_field(&mut self, field: &'tcx hir::StructField) { - if self.need_check_next_union_field { - if self.should_warn_about_field(&field) { - self.warn_dead_code(field.id, field.span, field.name, "field"); - } else { - let did = self.tcx.hir.get_parent_did(field.id); - if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { - match self.tcx.hir.find(node_id) { - Some(hir_map::NodeItem(item)) => match item.node { - // If this is an union's field, it means all previous fields - // have been used as well so no need to check further. - Item_::ItemUnion(_, _) => self.need_check_next_union_field = false, - _ => {} - }, - _ => {} - } - } - } + if self.should_warn_about_field(&field) { + self.warn_dead_code(field.id, field.span, + field.name, "field"); } intravisit::walk_struct_field(self, field); } @@ -630,7 +628,6 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let mut visitor = DeadVisitor { tcx: tcx, live_symbols: live_symbols, - need_check_next_union_field: true, }; intravisit::walk_crate(&mut visitor, krate); } diff --git a/src/test/ui/union-fields.rs b/src/test/ui/union-fields.rs index 87a617301336..7b39a548fe9d 100644 --- a/src/test/ui/union-fields.rs +++ b/src/test/ui/union-fields.rs @@ -10,20 +10,24 @@ #![deny(dead_code)] -union U { - x: u32, - y: f32, +union U1 { + a: u8, // should not be reported + b: u8, // should not be reported + c: u8, // should be reported } - -struct V { - x: u32, - y: u32, +union U2 { + a: u8, // should be reported + b: u8, // should not be reported + c: u8, // should not be reported } +union NoDropLike { a: u8 } // should be reported as unused fn main() { - let u = U { x: 0x3f800000 }; - let _f = unsafe { u.y }; - let v = V { x: 0, y: 0 }; - println!("{}", v.x); -} + let u = U1 { a: 0 }; + let _a = unsafe { u.b }; + let u = U2 { c: 0 }; + let _b = unsafe { u.b }; + + let _u = NoDropLike { a: 10 }; +} From 09420fc2060e08e332efd00098cda6447285290d Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 6 Aug 2017 18:49:33 +0200 Subject: [PATCH 204/213] Fix union unused fields check --- src/librustc/middle/dead.rs | 25 +++++++++++-------------- src/test/ui/union-fields.stderr | 22 +++++++++++++++++----- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index c82cfb344967..6532cde9715b 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -190,20 +190,18 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { self.inherited_pub_visibility = had_inherited_pub_visibility; } - fn mark_as_used_if_union(&mut self, did: DefId) { + fn mark_as_used_if_union(&mut self, did: DefId, fields: &hir::HirVec) { if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { - match self.tcx.hir.find(node_id) { - Some(hir_map::NodeItem(item)) => match item.node { - Item_::ItemUnion(ref variant, _) => { - if variant.fields().len() > 1 { - for field in variant.fields() { + if let Some(hir_map::NodeItem(item)) = self.tcx.hir.find(node_id) { + if let Item_::ItemUnion(ref variant, _) = item.node { + if variant.fields().len() > 1 { + for field in variant.fields() { + if fields.iter().find(|x| x.name.node == field.name).is_some() { self.live_symbols.insert(field.id); } } } - _ => {} - }, - _ => {} + } } } } @@ -239,11 +237,6 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { hir::ExprPath(ref qpath @ hir::QPath::TypeRelative(..)) => { let def = self.tables.qpath_def(qpath, expr.id); self.handle_definition(def); - self.mark_as_used_if_union(def.def_id()); - } - hir::ExprPath(ref qpath @ hir::QPath::Resolved(..)) => { - let def = self.tables.qpath_def(qpath, expr.id); - self.mark_as_used_if_union(def.def_id()); } hir::ExprMethodCall(..) => { self.lookup_and_handle_method(expr.id); @@ -254,6 +247,10 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { hir::ExprTupField(ref lhs, idx) => { self.handle_tup_field_access(&lhs, idx.node); } + hir::ExprStruct(ref qpath, ref fields, _) => { + let def = self.tables.qpath_def(qpath, expr.id); + self.mark_as_used_if_union(def.def_id(), fields); + } _ => () } diff --git a/src/test/ui/union-fields.stderr b/src/test/ui/union-fields.stderr index d0f1a9214255..5c47ba388a45 100644 --- a/src/test/ui/union-fields.stderr +++ b/src/test/ui/union-fields.stderr @@ -1,8 +1,8 @@ -error: field is never used: `y` - --> $DIR/union-fields.rs:20:5 +error: field is never used: `c` + --> $DIR/union-fields.rs:16:5 | -20 | y: u32, - | ^^^^^^ +16 | c: u8, // should be reported + | ^^^^^ | note: lint level defined here --> $DIR/union-fields.rs:11:9 @@ -10,5 +10,17 @@ note: lint level defined here 11 | #![deny(dead_code)] | ^^^^^^^^^ -error: aborting due to previous error +error: field is never used: `a` + --> $DIR/union-fields.rs:19:5 + | +19 | a: u8, // should be reported + | ^^^^^ + +error: field is never used: `a` + --> $DIR/union-fields.rs:23:20 + | +23 | union NoDropLike { a: u8 } // should be reported as unused + | ^^^^^ + +error: aborting due to 3 previous errors From f94157eb616d18655809ea60af870e1888476c9a Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 6 Aug 2017 20:46:32 +0200 Subject: [PATCH 205/213] Handle type aliases as well --- src/librustc/middle/dead.rs | 9 ++++++--- src/test/ui/union-fields.rs | 9 +++++++++ src/test/ui/union-fields.stderr | 8 +++++++- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 6532cde9715b..a525b4e13b78 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -247,9 +247,12 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { hir::ExprTupField(ref lhs, idx) => { self.handle_tup_field_access(&lhs, idx.node); } - hir::ExprStruct(ref qpath, ref fields, _) => { - let def = self.tables.qpath_def(qpath, expr.id); - self.mark_as_used_if_union(def.def_id(), fields); + hir::ExprStruct(_, ref fields, _) => { + if let ty::TypeVariants::TyAdt(ref def, _) = self.tables.expr_ty(expr).sty { + if def.is_union() { + self.mark_as_used_if_union(def.did, fields); + } + } } _ => () } diff --git a/src/test/ui/union-fields.rs b/src/test/ui/union-fields.rs index 7b39a548fe9d..021f57e3eee0 100644 --- a/src/test/ui/union-fields.rs +++ b/src/test/ui/union-fields.rs @@ -22,6 +22,13 @@ union U2 { } union NoDropLike { a: u8 } // should be reported as unused +union U { + a: u8, // should not be reported + b: u8, // should not be reported + c: u8, // should be reported +} +type A = U; + fn main() { let u = U1 { a: 0 }; let _a = unsafe { u.b }; @@ -30,4 +37,6 @@ fn main() { let _b = unsafe { u.b }; let _u = NoDropLike { a: 10 }; + let u = A { a: 0 }; + let _b = unsafe { u.b }; } diff --git a/src/test/ui/union-fields.stderr b/src/test/ui/union-fields.stderr index 5c47ba388a45..f3a2702d5aef 100644 --- a/src/test/ui/union-fields.stderr +++ b/src/test/ui/union-fields.stderr @@ -22,5 +22,11 @@ error: field is never used: `a` 23 | union NoDropLike { a: u8 } // should be reported as unused | ^^^^^ -error: aborting due to 3 previous errors +error: field is never used: `c` + --> $DIR/union-fields.rs:28:5 + | +28 | c: u8, // should be reported + | ^^^^^ + +error: aborting due to 4 previous errors From 08188c3c9267740aaff42a1d33acad394d807dd1 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Sun, 6 Aug 2017 18:20:08 +0200 Subject: [PATCH 206/213] Add missing error code for private method --- src/librustc_typeck/check/method/suggest.rs | 4 +- src/librustc_typeck/diagnostics.rs | 56 +++++++++++++++++++++ src/test/compile-fail/E0624.rs | 22 ++++++++ 3 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 src/test/compile-fail/E0624.rs diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index c480febdec66..53da9e19ee0c 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -312,8 +312,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } MethodError::PrivateMatch(def) => { - let msg = format!("{} `{}` is private", def.kind_name(), item_name); - self.tcx.sess.span_err(span, &msg); + struct_span_err!(self.tcx.sess, span, E0624, + "{} `{}` is private", def.kind_name(), item_name).emit(); } MethodError::IllegalSizedBound(candidates) => { diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 3037e8d4a160..ab1c098fd024 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -4644,6 +4644,62 @@ whose implementation is handled specially by the compiler. In order to fix this error, just declare a function. "##, +E0624: r##" +A private item was used outside of its scope. + +Erroneous code example: + +```compile_fail,E0624 +mod inner { + pub struct Foo; + + impl Foo { + fn method(&self) {} + } +} + +let foo = inner::Foo; +foo.method(); // error: method `method` is private +``` + +Two possibilities are available to solve this issue: + +1. Only use the item in the scope it has been defined: + +``` +mod inner { + pub struct Foo; + + impl Foo { + fn method(&self) {} + } + + pub fn call_method(foo: &Foo) { // We create a public function. + foo.method(); // Which calls the item. + } +} + +let foo = inner::Foo; +inner::call_method(&foo); // And since the function is public, we can call the + // method through it. +``` + +2. Make the item public: + +``` +mod inner { + pub struct Foo; + + impl Foo { + pub fn method(&self) {} // It's now public. + } +} + +let foo = inner::Foo; +foo.method(); // Ok! +``` +"##, + } register_diagnostics! { diff --git a/src/test/compile-fail/E0624.rs b/src/test/compile-fail/E0624.rs new file mode 100644 index 000000000000..952e0b31c4ca --- /dev/null +++ b/src/test/compile-fail/E0624.rs @@ -0,0 +1,22 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod inner { + pub struct Foo; + + impl Foo { + fn method(&self) {} + } +} + +fn main() { + let foo = inner::Foo; + foo.method(); //~ ERROR method `method` is private [E0624] +} From 17d5f6a0861a44c6aeebf0f7c1d007fdd94c79aa Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Mon, 7 Aug 2017 12:16:04 +1200 Subject: [PATCH 207/213] update rls --- src/Cargo.lock | 6 +++--- src/tools/rls | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index 38ce7850453f..398b23061ead 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1140,7 +1140,7 @@ dependencies = [ "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "racer 2.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-analysis 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1155,7 +1155,7 @@ dependencies = [ [[package]] name = "rls-analysis" -version = "0.6.0" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "derive-new 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2229,7 +2229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" -"checksum rls-analysis 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca5b4d890953b9cc60c8c97f196921d02edf75798ccab930604aa3b4f890616d" +"checksum rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d2cb40c0371765897ae428b5706bb17135705ad4f6d1b8b6afbaabcf8c9b5cff" "checksum rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11d339f1888e33e74d8032de0f83c40b2bdaaaf04a8cfc03b32186c3481fb534" "checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a" "checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff" diff --git a/src/tools/rls b/src/tools/rls index cb8a5900fd3b..5d4bbd9052fe 160000 --- a/src/tools/rls +++ b/src/tools/rls @@ -1 +1 @@ -Subproject commit cb8a5900fd3b5907b2bac07ca9832f91fed29750 +Subproject commit 5d4bbd9052fe2af849a7d017b85df98ad002c20f From 7efeade2685232ed01f65b69e6d8ad710eceb351 Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Sun, 30 Jul 2017 23:22:09 -0700 Subject: [PATCH 208/213] de-orphan extended information Bizarrely, librustc_passes, librustc_plugin, librustc_mir, and libsyntax weren't getting their error explanations registered. Resolves #35284. --- src/librustc_driver/lib.rs | 4 ++++ src/librustc_mir/lib.rs | 2 ++ src/librustc_passes/lib.rs | 2 ++ src/librustc_plugin/lib.rs | 2 ++ src/libsyntax/lib.rs | 2 +- 5 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index d6b1eb86937b..4c337993468e 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -1207,6 +1207,10 @@ pub fn diagnostics_registry() -> errors::registry::Registry { all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_mir::DIAGNOSTICS); + all_errors.extend_from_slice(&syntax::DIAGNOSTICS); Registry::new(&all_errors) } diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 912c2043390f..ea8624930e5f 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -57,3 +57,5 @@ pub fn provide(providers: &mut Providers) { shim::provide(providers); transform::provide(providers); } + +__build_diagnostic_array! { librustc_mir, DIAGNOSTICS } diff --git a/src/librustc_passes/lib.rs b/src/librustc_passes/lib.rs index 3949152e8489..ed5ea69d04ea 100644 --- a/src/librustc_passes/lib.rs +++ b/src/librustc_passes/lib.rs @@ -45,3 +45,5 @@ pub mod loops; pub mod mir_stats; pub mod no_asm; pub mod static_recursion; + +__build_diagnostic_array! { librustc_passes, DIAGNOSTICS } diff --git a/src/librustc_plugin/lib.rs b/src/librustc_plugin/lib.rs index 1de31c5d7915..e17a3c82b502 100644 --- a/src/librustc_plugin/lib.rs +++ b/src/librustc_plugin/lib.rs @@ -84,3 +84,5 @@ pub mod diagnostics; pub mod registry; pub mod load; pub mod build; + +__build_diagnostic_array! { librustc_plugin, DIAGNOSTICS } diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index a8338fccb6b1..43345b02bf61 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -148,4 +148,4 @@ pub mod ext { #[cfg(test)] mod test_snippet; -// __build_diagnostic_array! { libsyntax, DIAGNOSTICS } +__build_diagnostic_array! { libsyntax, DIAGNOSTICS } From 86b7546204c4220e40ccc3b1631ad407c1357911 Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Sun, 6 Aug 2017 21:36:06 -0700 Subject: [PATCH 209/213] fixing doctest failures in resurfaced extended information After repatriating error explanations to the global registry, some lurking doctest failures surfaced and needed to be chased down. Sadly, a few doctests needed to be ignored due to a not-yet-understood regression in the doctest `compile_fail` functionality (filed #43707). --- src/librustc_mir/diagnostics.rs | 14 ++++++++++--- src/librustc_passes/diagnostics.rs | 2 +- src/libsyntax/diagnostic_list.rs | 32 ++++++++++++++++++++---------- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/src/librustc_mir/diagnostics.rs b/src/librustc_mir/diagnostics.rs index 6f3db0b388de..6530b356e33f 100644 --- a/src/librustc_mir/diagnostics.rs +++ b/src/librustc_mir/diagnostics.rs @@ -122,10 +122,8 @@ On the other hand, static and constant pointers can point either to a known numeric address or to the address of a symbol. ``` +static MY_STATIC: u32 = 42; static MY_STATIC_ADDR: &'static u32 = &MY_STATIC; -// ... and also -static MY_STATIC_ADDR2: *const u32 = &MY_STATIC; - const CONST_ADDR: *const u8 = 0x5f3759df as *const u8; ``` @@ -160,6 +158,16 @@ Remember: you can't use a function call inside a const's initialization expression! However, you can totally use it anywhere else: ``` +enum Test { + V1 +} + +impl Test { + fn func(&self) -> i32 { + 12 + } +} + fn main() { const FOO: Test = Test::V1; diff --git a/src/librustc_passes/diagnostics.rs b/src/librustc_passes/diagnostics.rs index 464dd72e5698..907a258a12dc 100644 --- a/src/librustc_passes/diagnostics.rs +++ b/src/librustc_passes/diagnostics.rs @@ -221,7 +221,7 @@ while break {} To fix this, add a label specifying which loop is being broken out of: ``` -`foo: while break `foo {} +'foo: while break 'foo {} ``` "## } diff --git a/src/libsyntax/diagnostic_list.rs b/src/libsyntax/diagnostic_list.rs index 508feca9731f..6598ecb94448 100644 --- a/src/libsyntax/diagnostic_list.rs +++ b/src/libsyntax/diagnostic_list.rs @@ -42,7 +42,7 @@ The `inline` attribute was malformed. Erroneous code example: -```compile_fail,E0534 +```ignore (compile_fail not working here; see Issue #43707) #[inline()] // error: expected one argument pub fn something() {} @@ -80,7 +80,7 @@ An unknown argument was given to the `inline` attribute. Erroneous code example: -```compile_fail,E0535 +```ignore (compile_fail not working here; see Issue #43707) #[inline(unknown)] // error: invalid argument pub fn something() {} @@ -190,7 +190,9 @@ A literal was used in an attribute that doesn't support literals. Erroneous code example: -```compile_fail,E0565 +```ignore (compile_fail not working here; see Issue #43707) +#![feature(attr_literals)] + #[inline("always")] // error: unsupported literal pub fn something() {} ``` @@ -209,7 +211,7 @@ A file wasn't found for an out-of-line module. Erroneous code example: -```compile_fail,E0583 +```ignore (compile_fail not working here; see Issue #43707) mod file_that_doesnt_exist; // error: file not found for module fn main() {} @@ -251,23 +253,33 @@ An inclusive range was used with no end. Erroneous code example: ```compile_fail,E0586 -let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; -let x = &tmp[1...]; // error: inclusive range was used with no end +#![feature(inclusive_range_syntax)] + +fn main() { + let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; + let x = &tmp[1...]; // error: inclusive range was used with no end +} ``` An inclusive range needs an end in order to *include* it. If you just need a start and no end, use a non-inclusive range (with `..`): ``` -let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; -let x = &tmp[1..]; // ok! +fn main() { + let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; + let x = &tmp[1..]; // ok! +} ``` Or put an end to your inclusive range: ``` -let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; -let x = &tmp[1...3]; // ok! +#![feature(inclusive_range_syntax)] + +fn main() { + let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1]; + let x = &tmp[1...3]; // ok! +} ``` "##, From 75b7a6f1a662dab0752d189ab635580a21b06e42 Mon Sep 17 00:00:00 2001 From: "Zack M. Davis" Date: Sun, 6 Aug 2017 21:50:41 -0700 Subject: [PATCH 210/213] comment out record of now-unused error code E0563 The sole appearance of this code was deleted in 6383de15; the existing practice in these cases seems to be to comment out its mention in `register_diagnostics!`. --- src/librustc_typeck/diagnostics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 3037e8d4a160..1323997315e7 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -4709,7 +4709,7 @@ register_diagnostics! { // between structures with the same definition E0521, // redundant default implementations of trait E0533, // `{}` does not name a unit variant, unit struct or a constant - E0563, // cannot determine a type for this `impl Trait`: {} +// E0563, // cannot determine a type for this `impl Trait`: {} // removed in 6383de15 E0564, // only named lifetimes are allowed in `impl Trait`, // but `{}` was found in the type `{}` E0567, // auto traits can not have type parameters From 4e3a0b636fffdf9d514420681dc60ecbca221f42 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Mon, 7 Aug 2017 15:56:43 +0300 Subject: [PATCH 211/213] rustc::middle::dataflow - visit the CFG in RPO We used to propagate bits in node-id order, which sometimes caused an excessive number of iterations, especially when macros were present. As everyone knows, visiting the CFG in RPO bounds the number of iterators by 1 plus the depth of the most deeply nested loop (times the height of the lattice, which is 1). Fixes #43704. --- src/librustc/middle/dataflow.rs | 17 ++++++-- src/librustc_data_structures/graph/mod.rs | 36 +++++++++++++++++ src/librustc_data_structures/graph/tests.rs | 43 +++++++++++++++++++++ 3 files changed, 92 insertions(+), 4 deletions(-) diff --git a/src/librustc/middle/dataflow.rs b/src/librustc/middle/dataflow.rs index f6be70900917..d394c0f0c873 100644 --- a/src/librustc/middle/dataflow.rs +++ b/src/librustc/middle/dataflow.rs @@ -22,6 +22,9 @@ use std::mem; use std::usize; use syntax::ast; use syntax::print::pprust::PrintState; + +use rustc_data_structures::graph::OUTGOING; + use util::nodemap::NodeMap; use hir; use hir::intravisit::{self, IdRange}; @@ -523,12 +526,16 @@ impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> { changed: true }; + let nodes_po = cfg.graph.nodes_in_postorder(OUTGOING, cfg.entry); let mut temp = vec![0; words_per_id]; + let mut num_passes = 0; while propcx.changed { + num_passes += 1; propcx.changed = false; propcx.reset(&mut temp); - propcx.walk_cfg(cfg, &mut temp); + propcx.walk_cfg(cfg, &nodes_po, &mut temp); } + debug!("finished in {} iterations", num_passes); } debug!("Dataflow result for {}:", self.analysis_name); @@ -543,12 +550,15 @@ impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> { impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> { fn walk_cfg(&mut self, cfg: &cfg::CFG, + nodes_po: &[CFGIndex], in_out: &mut [usize]) { debug!("DataFlowContext::walk_cfg(in_out={}) {}", bits_to_string(in_out), self.dfcx.analysis_name); assert!(self.dfcx.bits_per_id > 0); - cfg.graph.each_node(|node_index, node| { + // Iterate over nodes in reverse postorder + for &node_index in nodes_po.iter().rev() { + let node = cfg.graph.node(node_index); debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}", node_index, node.data.id(), bits_to_string(in_out)); @@ -563,8 +573,7 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> { // Propagate state on-exit from node into its successors. self.propagate_bits_into_graph_successors_of(in_out, cfg, node_index); - true // continue to next node - }); + } } fn reset(&mut self, bits: &mut [usize]) { diff --git a/src/librustc_data_structures/graph/mod.rs b/src/librustc_data_structures/graph/mod.rs index f94ed6b72094..f562ae0e3b84 100644 --- a/src/librustc_data_structures/graph/mod.rs +++ b/src/librustc_data_structures/graph/mod.rs @@ -308,6 +308,42 @@ impl Graph { DepthFirstTraversal::with_start_node(self, start, direction) } + pub fn nodes_in_postorder<'a>(&'a self, + direction: Direction, + entry_node: NodeIndex) + -> Vec + { + let mut visited = BitVector::new(self.len_nodes()); + let mut stack = vec![]; + let mut result = Vec::with_capacity(self.len_nodes()); + let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| { + if visited.insert(node.0) { + stack.push((node, self.adjacent_edges(node, direction))); + } + }; + + for node in Some(entry_node).into_iter() + .chain(self.enumerated_nodes().map(|(node, _)| node)) + { + push_node(&mut stack, node); + while let Some((node, mut iter)) = stack.pop() { + if let Some((_, child)) = iter.next() { + let target = child.source_or_target(direction); + // the current node needs more processing, so + // add it back to the stack + stack.push((node, iter)); + // and then push the new node + push_node(&mut stack, target); + } else { + result.push(node); + } + } + } + + assert_eq!(result.len(), self.len_nodes()); + result + } + /// Whether or not a node can be reached from itself. pub fn is_node_cyclic(&self, starting_node_index: NodeIndex) -> bool { // This is similar to depth traversal below, but we diff --git a/src/librustc_data_structures/graph/tests.rs b/src/librustc_data_structures/graph/tests.rs index bdefc39a61a8..b6a0d4cff5a3 100644 --- a/src/librustc_data_structures/graph/tests.rs +++ b/src/librustc_data_structures/graph/tests.rs @@ -175,3 +175,46 @@ fn is_node_cyclic_b() { let graph = create_graph_with_cycle(); assert!(graph.is_node_cyclic(NodeIndex(1))); } + +#[test] +fn nodes_in_postorder() { + let expected = vec![ + ("A", vec!["C", "E", "D", "B", "A", "F"]), + ("B", vec!["C", "E", "D", "B", "A", "F"]), + ("C", vec!["C", "E", "D", "B", "A", "F"]), + ("D", vec!["C", "E", "D", "B", "A", "F"]), + ("E", vec!["C", "E", "D", "B", "A", "F"]), + ("F", vec!["C", "E", "D", "B", "F", "A"]) + ]; + + let graph = create_graph(); + + for ((idx, node), &(node_name, ref expected)) + in graph.enumerated_nodes().zip(&expected) + { + assert_eq!(node.data, node_name); + assert_eq!(expected, + &graph.nodes_in_postorder(OUTGOING, idx) + .into_iter().map(|idx| *graph.node_data(idx)) + .collect::>()); + } + + let expected = vec![ + ("A", vec!["D", "C", "B", "A"]), + ("B", vec!["D", "C", "B", "A"]), + ("C", vec!["B", "D", "C", "A"]), + ("D", vec!["C", "B", "D", "A"]), + ]; + + let graph = create_graph_with_cycle(); + + for ((idx, node), &(node_name, ref expected)) + in graph.enumerated_nodes().zip(&expected) + { + assert_eq!(node.data, node_name); + assert_eq!(expected, + &graph.nodes_in_postorder(OUTGOING, idx) + .into_iter().map(|idx| *graph.node_data(idx)) + .collect::>()); + } +} From 94c90e79e1681cf2ff1b1ef0a641f203ccbd2b9c Mon Sep 17 00:00:00 2001 From: Oliver Middleton Date: Mon, 7 Aug 2017 16:04:46 +0100 Subject: [PATCH 212/213] rustbuild: Replace create_dir_racy with create_dir_all `create_dir_all` has since been fixed so no need for `create_dir_racy`. --- src/build_helper/lib.rs | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/src/build_helper/lib.rs b/src/build_helper/lib.rs index 7011261ab6c1..8b4c7f2ac317 100644 --- a/src/build_helper/lib.rs +++ b/src/build_helper/lib.rs @@ -13,7 +13,6 @@ extern crate filetime; use std::fs::File; -use std::io; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::{fs, env}; @@ -211,7 +210,7 @@ pub fn native_lib_boilerplate(src_name: &str, let out_dir = env::var_os("RUSTBUILD_NATIVE_DIR").unwrap_or(env::var_os("OUT_DIR").unwrap()); let out_dir = PathBuf::from(out_dir).join(out_name); - t!(create_dir_racy(&out_dir)); + t!(fs::create_dir_all(&out_dir)); if link_name.contains('=') { println!("cargo:rustc-link-lib={}", link_name); } else { @@ -260,21 +259,3 @@ fn fail(s: &str) -> ! { println!("\n\n{}\n\n", s); std::process::exit(1); } - -fn create_dir_racy(path: &Path) -> io::Result<()> { - match fs::create_dir(path) { - Ok(()) => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} - Err(e) => return Err(e), - } - match path.parent() { - Some(p) => try!(create_dir_racy(p)), - None => return Err(io::Error::new(io::ErrorKind::Other, "failed to create whole tree")), - } - match fs::create_dir(path) { - Ok(()) => Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), - Err(e) => Err(e), - } -} From 378a6188640f4e0fb2cc075d6f6062d53aadbb7a Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Sun, 6 Aug 2017 20:12:53 -0700 Subject: [PATCH 213/213] addrinfo hint in lookup_host() needs clean initialization on all platforms Fixes #43649 --- src/liblibc | 2 +- src/libstd/sys_common/net.rs | 12 ++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/src/liblibc b/src/liblibc index ec1e5ab1ef8b..2a5b50b7f7f5 160000 --- a/src/liblibc +++ b/src/liblibc @@ -1 +1 @@ -Subproject commit ec1e5ab1ef8baca57f8776bbebd9343572a87082 +Subproject commit 2a5b50b7f7f539a0fd201331d6c1e0534aa332f5 diff --git a/src/libstd/sys_common/net.rs b/src/libstd/sys_common/net.rs index 5775dd4f1fcd..1ca39ff9d4a1 100644 --- a/src/libstd/sys_common/net.rs +++ b/src/libstd/sys_common/net.rs @@ -165,16 +165,8 @@ pub fn lookup_host(host: &str) -> io::Result { init(); let c_host = CString::new(host)?; - let hints = c::addrinfo { - ai_flags: 0, - ai_family: 0, - ai_socktype: c::SOCK_STREAM, - ai_protocol: 0, - ai_addrlen: 0, - ai_addr: ptr::null_mut(), - ai_canonname: ptr::null_mut(), - ai_next: ptr::null_mut() - }; + let mut hints: c::addrinfo = unsafe { mem::zeroed() }; + hints.ai_socktype = c::SOCK_STREAM; let mut res = ptr::null_mut(); unsafe { match cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res)) {