diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4cf0e5fba537..8032154a7365 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -65,7 +65,7 @@ jobs:
defaults:
run:
shell: ${{ contains(matrix.os, 'windows') && 'msys2 {0}' || 'bash' }}
- timeout-minutes: 600
+ timeout-minutes: 240
env:
CI_JOB_NAME: ${{ matrix.image }}
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
diff --git a/Cargo.lock b/Cargo.lock
index afeb9faec097..cafc623c185a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3420,6 +3420,7 @@ version = "0.2.0"
dependencies = [
"ar",
"bstr",
+ "build_helper",
"gimli 0.28.1",
"object 0.34.0",
"regex",
@@ -3514,6 +3515,12 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5be1bdc7edf596692617627bbfeaba522131b18e06ca4df2b6b689e3c5d5ce84"
+[[package]]
+name = "rustc-stable-hash"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5c9f15eec8235d7cb775ee6f81891db79b98fd54ba1ad8fae565b88ef1ae4e2"
+
[[package]]
name = "rustc-std-workspace-alloc"
version = "1.99.0"
@@ -3852,6 +3859,7 @@ dependencies = [
"portable-atomic",
"rustc-hash",
"rustc-rayon",
+ "rustc-stable-hash",
"rustc_arena",
"rustc_graphviz",
"rustc_index",
diff --git a/INSTALL.md b/INSTALL.md
index 1c2cecf8ef9b..ded0b59fc6cd 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -215,7 +215,7 @@ python x.py build
Right now, building Rust only works with some known versions of Visual Studio.
If you have a more recent version installed and the build system doesn't
-understand, you may need to force rustbuild to use an older version.
+understand, you may need to force bootstrap to use an older version.
This can be done by manually calling the appropriate vcvars file before running
the bootstrap.
diff --git a/RELEASES.md b/RELEASES.md
index 305da6a3550e..0ecd472efb6e 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -1,3 +1,161 @@
+Version 1.80 (2024-07-25)
+==========================
+
+
+
+Language
+--------
+- [Document maximum allocation size](https://github.com/rust-lang/rust/pull/116675/)
+- [Allow zero-byte offsets and ZST read/writes on arbitrary pointers](https://github.com/rust-lang/rust/pull/117329/)
+- [Support C23's variadics without a named parameter](https://github.com/rust-lang/rust/pull/124048/)
+- [Stabilize `exclusive_range_pattern` feature](https://github.com/rust-lang/rust/pull/124459/)
+- [Guarantee layout and ABI of `Result` in some scenarios](https://github.com/rust-lang/rust/pull/124870)
+
+
+
+Compiler
+--------
+- [Update cc crate to v1.0.97 allowing additional spectre mitigations on MSVC targets](https://github.com/rust-lang/rust/pull/124892/)
+- [Allow field reordering on types marked `repr(packed(1))`](https://github.com/rust-lang/rust/pull/125360/)
+- [Add a lint against never type fallback affecting unsafe code](https://github.com/rust-lang/rust/pull/123939/)
+- [Disallow cast with trailing braced macro in let-else](https://github.com/rust-lang/rust/pull/125049/)
+- [Expand `for_loops_over_fallibles` lint to lint on fallibles behind references.](https://github.com/rust-lang/rust/pull/125156/)
+- [self-contained linker: retry linking without `-fuse-ld=lld` on CCs that don't support it](https://github.com/rust-lang/rust/pull/125417/)
+- [Do not parse CVarArgs (`...`) as a type in trait bounds](https://github.com/rust-lang/rust/pull/125863/)
+- Improvements to LLDB formatting [#124458](https://github.com/rust-lang/rust/pull/124458) [#124500](https://github.com/rust-lang/rust/pull/124500)
+- [For the wasm32-wasip2 target default to PIC and do not use `-fuse-ld=lld`](https://github.com/rust-lang/rust/pull/124858/)
+- [Add x86_64-unknown-linux-none as a tier 3 target](https://github.com/rust-lang/rust/pull/125023/)
+- [Lint on `foo.into_iter()` resolving to `&Box<[T]>: IntoIterator`](https://github.com/rust-lang/rust/pull/124097/)
+
+
+
+Libraries
+---------
+- [Add `size_of` and `size_of_val` and `align_of` and `align_of_val` to the prelude](https://github.com/rust-lang/rust/pull/123168/)
+- [Abort a process when FD ownership is violated](https://github.com/rust-lang/rust/pull/124210/)
+- [io::Write::write_fmt: panic if the formatter fails when the stream does not fail](https://github.com/rust-lang/rust/pull/125012/)
+- [Panic if `PathBuf::set_extension` would add a path separator](https://github.com/rust-lang/rust/pull/125070/)
+- [Add assert_unsafe_precondition to unchecked_{add,sub,neg,mul,shl,shr} methods](https://github.com/rust-lang/rust/pull/121571/)
+- [Update `c_char` on AIX to use the correct type](https://github.com/rust-lang/rust/pull/122986/)
+- [`offset_of!` no longer returns a temporary](https://github.com/rust-lang/rust/pull/124484/)
+- [Handle sigma in `str.to_lowercase` correctly](https://github.com/rust-lang/rust/pull/124773/)
+- [Raise `DEFAULT_MIN_STACK_SIZE` to at least 64KiB](https://github.com/rust-lang/rust/pull/126059/)
+
+
+
+Stabilized APIs
+---------------
+- [`impl Default for Rc`](https://doc.rust-lang.org/beta/alloc/rc/struct.Rc.html#impl-Default-for-Rc%3CCStr%3E)
+- [`impl Default for Rc`](https://doc.rust-lang.org/beta/alloc/rc/struct.Rc.html#impl-Default-for-Rc%3Cstr%3E)
+- [`impl Default for Rc<[T]>`](https://doc.rust-lang.org/beta/alloc/rc/struct.Rc.html#impl-Default-for-Rc%3C%5BT%5D%3E)
+- [`impl Default for Arc`](https://doc.rust-lang.org/beta/alloc/sync/struct.Arc.html#impl-Default-for-Arc%3Cstr%3E)
+- [`impl Default for Arc`](https://doc.rust-lang.org/beta/alloc/sync/struct.Arc.html#impl-Default-for-Arc%3CCStr%3E)
+- [`impl Default for Arc<[T]>`](https://doc.rust-lang.org/beta/alloc/sync/struct.Arc.html#impl-Default-for-Arc%3C%5BT%5D%3E)
+- [`impl IntoIterator for Box<[T]>`](https://doc.rust-lang.org/beta/alloc/boxed/struct.Box.html#impl-IntoIterator-for-Box%3C%5BI%5D,+A%3E)
+- [`impl FromIterator for Box`](https://doc.rust-lang.org/beta/alloc/boxed/struct.Box.html#impl-FromIterator%3CString%3E-for-Box%3Cstr%3E)
+- [`impl FromIterator for Box`](https://doc.rust-lang.org/beta/alloc/boxed/struct.Box.html#impl-FromIterator%3Cchar%3E-for-Box%3Cstr%3E)
+- [`LazyCell`](https://doc.rust-lang.org/beta/core/cell/struct.LazyCell.html)
+- [`LazyLock`](https://doc.rust-lang.org/beta/std/sync/struct.LazyLock.html)
+- [`Duration::div_duration_f32`](https://doc.rust-lang.org/beta/std/time/struct.Duration.html#method.div_duration_f32)
+- [`Duration::div_duration_f64`](https://doc.rust-lang.org/beta/std/time/struct.Duration.html#method.div_duration_f64)
+- [`Option::take_if`](https://doc.rust-lang.org/beta/std/option/enum.Option.html#method.take_if)
+- [`Seek::seek_relative`](https://doc.rust-lang.org/beta/std/io/trait.Seek.html#method.seek_relative)
+- [`BinaryHeap::as_slice`](https://doc.rust-lang.org/beta/std/collections/struct.BinaryHeap.html#method.as_slice)
+- [`NonNull::offset`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.offset)
+- [`NonNull::byte_offset`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.byte_offset)
+- [`NonNull::add`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.add)
+- [`NonNull::byte_add`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.byte_add)
+- [`NonNull::sub`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.sub)
+- [`NonNull::byte_sub`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.byte_sub)
+- [`NonNull::offset_from`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.offset_from)
+- [`NonNull::byte_offset_from`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.byte_offset_from)
+- [`NonNull::read`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.read)
+- [`NonNull::read_volatile`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.read_volatile)
+- [`NonNull::read_unaligned`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.read_unaligned)
+- [`NonNull::write`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.write)
+- [`NonNull::write_volatile`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.write_volatile)
+- [`NonNull::write_unaligned`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.write_unaligned)
+- [`NonNull::write_bytes`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.write_bytes)
+- [`NonNull::copy_to`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.copy_to)
+- [`NonNull::copy_to_nonoverlapping`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.copy_to_nonoverlapping)
+- [`NonNull::copy_from`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.copy_from)
+- [`NonNull::copy_from_nonoverlapping`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.copy_from_nonoverlapping)
+- [`NonNull::replace`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.replace)
+- [`NonNull::swap`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.swap)
+- [`NonNull::drop_in_place`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.drop_in_place)
+- [`NonNull::align_offset`](https://doc.rust-lang.org/beta/std/ptr/struct.NonNull.html#method.align_offset)
+- [`<[T]>::split_at_checked`](https://doc.rust-lang.org/beta/std/primitive.slice.html#method.split_at_checked)
+- [`<[T]>::split_at_mut_checked`](https://doc.rust-lang.org/beta/std/primitive.slice.html#method.split_at_mut_checked)
+- [`str::split_at_checked`](https://doc.rust-lang.org/beta/std/primitive.str.html#method.split_at_checked)
+- [`str::split_at_mut_checked`](https://doc.rust-lang.org/beta/std/primitive.str.html#method.split_at_mut_checked)
+- [`str::trim_ascii`](https://doc.rust-lang.org/beta/std/primitive.str.html#method.trim_ascii)
+- [`str::trim_ascii_start`](https://doc.rust-lang.org/beta/std/primitive.str.html#method.trim_ascii_start)
+- [`str::trim_ascii_end`](https://doc.rust-lang.org/beta/std/primitive.str.html#method.trim_ascii_end)
+- [`<[u8]>::trim_ascii`](https://doc.rust-lang.org/beta/core/primitive.slice.html#method.trim_ascii)
+- [`<[u8]>::trim_ascii_start`](https://doc.rust-lang.org/beta/core/primitive.slice.html#method.trim_ascii_start)
+- [`<[u8]>::trim_ascii_end`](https://doc.rust-lang.org/beta/core/primitive.slice.html#method.trim_ascii_end)
+- [`Ipv4Addr::BITS`](https://doc.rust-lang.org/beta/core/net/struct.Ipv4Addr.html#associatedconstant.BITS)
+- [`Ipv4Addr::to_bits`](https://doc.rust-lang.org/beta/core/net/struct.Ipv4Addr.html#method.to_bits)
+- [`Ipv4Addr::from_bits`](https://doc.rust-lang.org/beta/core/net/struct.Ipv4Addr.html#method.from_bits)
+- [`Ipv6Addr::BITS`](https://doc.rust-lang.org/beta/core/net/struct.Ipv6Addr.html#associatedconstant.BITS)
+- [`Ipv6Addr::to_bits`](https://doc.rust-lang.org/beta/core/net/struct.Ipv6Addr.html#method.to_bits)
+- [`Ipv6Addr::from_bits`](https://doc.rust-lang.org/beta/core/net/struct.Ipv6Addr.html#method.from_bits)
+- [`Vec::<[T; N]>::into_flattened`](https://doc.rust-lang.org/beta/alloc/vec/struct.Vec.html#method.into_flattened)
+- [`<[[T; N]]>::as_flattened`](https://doc.rust-lang.org/beta/core/primitive.slice.html#method.as_flattened)
+- [`<[[T; N]]>::as_flattened_mut`](https://doc.rust-lang.org/beta/core/primitive.slice.html#method.as_flattened_mut)
+
+These APIs are now stable in const contexts:
+
+- [`<[T]>::last_chunk`](https://doc.rust-lang.org/beta/core/primitive.slice.html#method.last_chunk)
+- [`BinaryHeap::new`](https://doc.rust-lang.org/beta/std/collections/struct.BinaryHeap.html#method.new)
+
+
+
+Cargo
+-----
+- [Stabilize `-Zcheck-cfg` as always enabled](https://github.com/rust-lang/cargo/pull/13571/)
+- [Warn, rather than fail publish, if a target is excluded](https://github.com/rust-lang/cargo/pull/13713/)
+- [Add special `check-cfg` lint config for the `unexpected_cfgs` lint](https://github.com/rust-lang/cargo/pull/13913/)
+- [Stabilize `cargo update --precise `](https://github.com/rust-lang/cargo/pull/13974/)
+- [Don't change file permissions on `Cargo.toml` when using `cargo add`](https://github.com/rust-lang/cargo/pull/13898/)
+- [Support using `cargo fix` on IPv6-only networks](https://github.com/rust-lang/cargo/pull/13907/)
+
+
+
+Rustdoc
+-----
+
+- [Allow searching for references](https://github.com/rust-lang/rust/pull/124148/)
+- [Stabilize `custom_code_classes_in_docs` feature](https://github.com/rust-lang/rust/pull/124577/)
+- [fix: In cross-crate scenarios show enum variants on type aliases of enums](https://github.com/rust-lang/rust/pull/125300/)
+
+
+
+Compatibility Notes
+-------------------
+- [rustfmt estimates line lengths differently when using non-ascii characters](https://github.com/rust-lang/rustfmt/issues/6203)
+- [Type aliases are now handled correctly in orphan check](https://github.com/rust-lang/rust/pull/117164/)
+- [Allow instructing rustdoc to read from stdin via `-`](https://github.com/rust-lang/rust/pull/124611/)
+- [`std::env::{set_var, remove_var}` can no longer be converted to safe function pointers and no longer implement the `Fn` family of traits](https://github.com/rust-lang/rust/pull/124636)
+- [Warn (or error) when `Self` constructor from outer item is referenced in inner nested item](https://github.com/rust-lang/rust/pull/124187/)
+- [Turn `indirect_structural_match` and `pointer_structural_match` lints into hard errors](https://github.com/rust-lang/rust/pull/124661/)
+- [Make `where_clause_object_safety` lint a regular object safety violation](https://github.com/rust-lang/rust/pull/125380/)
+- [Turn `proc_macro_back_compat` lint into a hard error.](https://github.com/rust-lang/rust/pull/125596/)
+- [Detect unused structs even when implementing private traits](https://github.com/rust-lang/rust/pull/122382/)
+- [`std::sync::ReentrantLockGuard` is no longer `Sync` if `T: !Sync`](https://github.com/rust-lang/rust/pull/125527) which means [`std::io::StdoutLock` and `std::io::StderrLock` are no longer Sync](https://github.com/rust-lang/rust/issues/127340)
+
+
+
+Internal Changes
+----------------
+
+These changes do not affect any public interfaces of Rust, but they represent
+significant improvements to the performance or internals of rustc and related
+tools.
+
+- Misc improvements to size of generated html by rustdoc e.g. [#124738](https://github.com/rust-lang/rust/pull/124738/) and [#123734](https://github.com/rust-lang/rust/pull/123734/)
+- [MSVC targets no longer depend on libc](https://github.com/rust-lang/rust/pull/124050/)
+
Version 1.79.0 (2024-06-13)
==========================
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
index 088ae9ba4410..d2c7b1c0753d 100644
--- a/compiler/rustc_ast/src/attr/mod.rs
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -202,21 +202,18 @@ impl Attribute {
}
}
- // Named `get_tokens` to distinguish it from the `::tokens` method.
- pub fn get_tokens(&self) -> TokenStream {
- match &self.kind {
- AttrKind::Normal(normal) => TokenStream::new(
- normal
- .tokens
- .as_ref()
- .unwrap_or_else(|| panic!("attribute is missing tokens: {self:?}"))
- .to_attr_token_stream()
- .to_token_trees(),
- ),
- &AttrKind::DocComment(comment_kind, data) => TokenStream::token_alone(
+ pub fn token_trees(&self) -> Vec {
+ match self.kind {
+ AttrKind::Normal(ref normal) => normal
+ .tokens
+ .as_ref()
+ .unwrap_or_else(|| panic!("attribute is missing tokens: {self:?}"))
+ .to_attr_token_stream()
+ .to_token_trees(),
+ AttrKind::DocComment(comment_kind, data) => vec![TokenTree::token_alone(
token::DocComment(comment_kind, self.style, data),
self.span,
- ),
+ )],
}
}
}
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index ee068f19332a..a92ef575777c 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -16,7 +16,7 @@
use crate::ast::{AttrStyle, StmtKind};
use crate::ast_traits::{HasAttrs, HasTokens};
use crate::token::{self, Delimiter, Nonterminal, Token, TokenKind};
-use crate::AttrVec;
+use crate::{AttrVec, Attribute};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::{self, Lrc};
@@ -179,11 +179,10 @@ impl AttrTokenStream {
AttrTokenStream(Lrc::new(tokens))
}
- /// Converts this `AttrTokenStream` to a plain `Vec`.
- /// During conversion, `AttrTokenTree::AttrsTarget` get 'flattened'
- /// back to a `TokenStream` of the form `outer_attr attr_target`.
- /// If there are inner attributes, they are inserted into the proper
- /// place in the attribute target tokens.
+ /// Converts this `AttrTokenStream` to a plain `Vec`. During
+ /// conversion, any `AttrTokenTree::AttrsTarget` gets "flattened" back to a
+ /// `TokenStream`, as described in the comment on
+ /// `attrs_and_tokens_to_token_trees`.
pub fn to_token_trees(&self) -> Vec {
let mut res = Vec::with_capacity(self.0.len());
for tree in self.0.iter() {
@@ -200,51 +199,7 @@ impl AttrTokenStream {
))
}
AttrTokenTree::AttrsTarget(target) => {
- let idx = target
- .attrs
- .partition_point(|attr| matches!(attr.style, crate::AttrStyle::Outer));
- let (outer_attrs, inner_attrs) = target.attrs.split_at(idx);
-
- let mut target_tokens = target.tokens.to_attr_token_stream().to_token_trees();
- if !inner_attrs.is_empty() {
- let mut found = false;
- // Check the last two trees (to account for a trailing semi)
- for tree in target_tokens.iter_mut().rev().take(2) {
- if let TokenTree::Delimited(span, spacing, delim, delim_tokens) = tree {
- // Inner attributes are only supported on extern blocks, functions,
- // impls, and modules. All of these have their inner attributes
- // placed at the beginning of the rightmost outermost braced group:
- // e.g. fn foo() { #![my_attr] }
- //
- // Therefore, we can insert them back into the right location
- // without needing to do any extra position tracking.
- //
- // Note: Outline modules are an exception - they can
- // have attributes like `#![my_attr]` at the start of a file.
- // Support for custom attributes in this position is not
- // properly implemented - we always synthesize fake tokens,
- // so we never reach this code.
-
- let mut stream = TokenStream::default();
- for inner_attr in inner_attrs {
- stream.push_stream(inner_attr.get_tokens());
- }
- stream.push_stream(delim_tokens.clone());
- *tree = TokenTree::Delimited(*span, *spacing, *delim, stream);
- found = true;
- break;
- }
- }
-
- assert!(
- found,
- "Failed to find trailing delimited group in: {target_tokens:?}"
- );
- }
- for attr in outer_attrs {
- res.extend(attr.get_tokens().0.iter().cloned());
- }
- res.extend(target_tokens);
+ attrs_and_tokens_to_token_trees(&target.attrs, &target.tokens, &mut res);
}
}
}
@@ -252,15 +207,76 @@ impl AttrTokenStream {
}
}
+// Converts multiple attributes and the tokens for a target AST node into token trees, and appends
+// them to `res`.
+//
+// Example: if the AST node is "fn f() { blah(); }", then:
+// - Simple if no attributes are present, e.g. "fn f() { blah(); }"
+// - Simple if only outer attribute are present, e.g. "#[outer1] #[outer2] fn f() { blah(); }"
+// - Trickier if inner attributes are present, because they must be moved within the AST node's
+// tokens, e.g. "#[outer] fn f() { #![inner] blah() }"
+fn attrs_and_tokens_to_token_trees(
+ attrs: &[Attribute],
+ target_tokens: &LazyAttrTokenStream,
+ res: &mut Vec,
+) {
+ let idx = attrs.partition_point(|attr| matches!(attr.style, crate::AttrStyle::Outer));
+ let (outer_attrs, inner_attrs) = attrs.split_at(idx);
+
+ // Add outer attribute tokens.
+ for attr in outer_attrs {
+ res.extend(attr.token_trees());
+ }
+
+ // Add target AST node tokens.
+ res.extend(target_tokens.to_attr_token_stream().to_token_trees());
+
+ // Insert inner attribute tokens.
+ if !inner_attrs.is_empty() {
+ let mut found = false;
+ // Check the last two trees (to account for a trailing semi)
+ for tree in res.iter_mut().rev().take(2) {
+ if let TokenTree::Delimited(span, spacing, delim, delim_tokens) = tree {
+ // Inner attributes are only supported on extern blocks, functions,
+ // impls, and modules. All of these have their inner attributes
+ // placed at the beginning of the rightmost outermost braced group:
+ // e.g. fn foo() { #![my_attr] }
+ //
+ // Therefore, we can insert them back into the right location
+ // without needing to do any extra position tracking.
+ //
+ // Note: Outline modules are an exception - they can
+ // have attributes like `#![my_attr]` at the start of a file.
+ // Support for custom attributes in this position is not
+ // properly implemented - we always synthesize fake tokens,
+ // so we never reach this code.
+ let mut tts = vec![];
+ for inner_attr in inner_attrs {
+ tts.extend(inner_attr.token_trees());
+ }
+ tts.extend(delim_tokens.0.iter().cloned());
+ let stream = TokenStream::new(tts);
+ *tree = TokenTree::Delimited(*span, *spacing, *delim, stream);
+ found = true;
+ break;
+ }
+ }
+ assert!(found, "Failed to find trailing delimited group in: {res:?}");
+ }
+}
+
/// Stores the tokens for an attribute target, along
/// with its attributes.
///
/// This is constructed during parsing when we need to capture
-/// tokens.
+/// tokens, for `cfg` and `cfg_attr` attributes.
///
/// For example, `#[cfg(FALSE)] struct Foo {}` would
/// have an `attrs` field containing the `#[cfg(FALSE)]` attr,
/// and a `tokens` field storing the (unparsed) tokens `struct Foo {}`
+///
+/// The `cfg`/`cfg_attr` processing occurs in
+/// `StripUnconfigured::configure_tokens`.
#[derive(Clone, Debug, Encodable, Decodable)]
pub struct AttrsTarget {
/// Attributes, both outer and inner.
@@ -437,18 +453,10 @@ impl TokenStream {
}
pub fn from_ast(node: &(impl HasAttrs + HasTokens + fmt::Debug)) -> TokenStream {
- let Some(tokens) = node.tokens() else {
- panic!("missing tokens for node: {:?}", node);
- };
- let attrs = node.attrs();
- let attr_stream = if attrs.is_empty() {
- tokens.to_attr_token_stream()
- } else {
- let target =
- AttrsTarget { attrs: attrs.iter().cloned().collect(), tokens: tokens.clone() };
- AttrTokenStream::new(vec![AttrTokenTree::AttrsTarget(target)])
- };
- TokenStream::new(attr_stream.to_token_trees())
+ let tokens = node.tokens().unwrap_or_else(|| panic!("missing tokens for node: {:?}", node));
+ let mut tts = vec![];
+ attrs_and_tokens_to_token_trees(node.attrs(), tokens, &mut tts);
+ TokenStream::new(tts)
}
pub fn from_nonterminal_ast(nt: &Nonterminal) -> TokenStream {
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 9cf3182daea5..2178b65727d0 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -607,8 +607,7 @@ fn maybe_stage_features(sess: &Session, features: &Features, krate: &ast::Crate)
// does not check the same for lib features unless there's at least one
// declared lang feature
if !sess.opts.unstable_features.is_nightly_build() {
- let lang_features = &features.declared_lang_features;
- if lang_features.len() == 0 {
+ if features.declared_features.is_empty() {
return;
}
for attr in krate.attrs.iter().filter(|attr| attr.has_name(sym::feature)) {
@@ -624,7 +623,8 @@ fn maybe_stage_features(sess: &Session, features: &Features, krate: &ast::Crate)
attr.meta_item_list().into_iter().flatten().flat_map(|nested| nested.ident())
{
let name = ident.name;
- let stable_since = lang_features
+ let stable_since = features
+ .declared_lang_features
.iter()
.flat_map(|&(feature, _, since)| if feature == name { since } else { None })
.next();
diff --git a/compiler/rustc_borrowck/src/dataflow.rs b/compiler/rustc_borrowck/src/dataflow.rs
index 00a30dc2240a..59b3c6916cbd 100644
--- a/compiler/rustc_borrowck/src/dataflow.rs
+++ b/compiler/rustc_borrowck/src/dataflow.rs
@@ -553,7 +553,7 @@ impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, '_, 'tcx> {
panic!("could not find BorrowIndex for location {location:?}");
});
- trans.gen(index);
+ trans.gen_(index);
}
// Make sure there are no remaining borrows for variables
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index b1e302e5e27d..c7f6840e401c 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -4,7 +4,7 @@
#![allow(rustc::untranslatable_diagnostic)]
use either::Either;
-use hir::ClosureKind;
+use hir::{ClosureKind, Path};
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::{codes::*, struct_span_code_err, Applicability, Diag, MultiSpan};
@@ -16,6 +16,7 @@ use rustc_hir::{CoroutineKind, CoroutineSource, LangItem};
use rustc_middle::bug;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::mir::VarDebugInfoContents;
use rustc_middle::mir::{
self, AggregateKind, BindingForm, BorrowKind, CallSource, ClearCrossCrate, ConstraintCategory,
FakeBorrowKind, FakeReadCause, LocalDecl, LocalInfo, LocalKind, Location, MutBorrowKind,
@@ -445,6 +446,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, '_, 'infcx, 'tcx> {
} else {
(None, &[][..], 0)
};
+ let mut can_suggest_clone = true;
if let Some(def_id) = def_id
&& let node = self.infcx.tcx.hir_node_by_def_id(def_id)
&& let Some(fn_sig) = node.fn_sig()
@@ -452,24 +454,73 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, '_, 'infcx, 'tcx> {
&& let Some(pos) = args.iter().position(|arg| arg.hir_id == expr.hir_id)
&& let Some(arg) = fn_sig.decl.inputs.get(pos + offset)
{
- let mut span: MultiSpan = arg.span.into();
- span.push_span_label(
- arg.span,
- "this parameter takes ownership of the value".to_string(),
- );
- let descr = match node.fn_kind() {
- Some(hir::intravisit::FnKind::ItemFn(..)) | None => "function",
- Some(hir::intravisit::FnKind::Method(..)) => "method",
- Some(hir::intravisit::FnKind::Closure) => "closure",
- };
- span.push_span_label(ident.span, format!("in this {descr}"));
- err.span_note(
- span,
- format!(
- "consider changing this parameter type in {descr} `{ident}` to borrow \
- instead if owning the value isn't necessary",
- ),
- );
+ let mut is_mut = false;
+ if let hir::TyKind::Path(hir::QPath::Resolved(None, path)) = arg.kind
+ && let Res::Def(DefKind::TyParam, param_def_id) = path.res
+ && self
+ .infcx
+ .tcx
+ .predicates_of(def_id)
+ .instantiate_identity(self.infcx.tcx)
+ .predicates
+ .into_iter()
+ .any(|pred| {
+ if let ty::ClauseKind::Trait(predicate) = pred.kind().skip_binder()
+ && [
+ self.infcx.tcx.get_diagnostic_item(sym::AsRef),
+ self.infcx.tcx.get_diagnostic_item(sym::AsMut),
+ self.infcx.tcx.get_diagnostic_item(sym::Borrow),
+ self.infcx.tcx.get_diagnostic_item(sym::BorrowMut),
+ ]
+ .contains(&Some(predicate.def_id()))
+ && let ty::Param(param) = predicate.self_ty().kind()
+ && let generics = self.infcx.tcx.generics_of(def_id)
+ && let param = generics.type_param(*param, self.infcx.tcx)
+ && param.def_id == param_def_id
+ {
+ if [
+ self.infcx.tcx.get_diagnostic_item(sym::AsMut),
+ self.infcx.tcx.get_diagnostic_item(sym::BorrowMut),
+ ]
+ .contains(&Some(predicate.def_id()))
+ {
+ is_mut = true;
+ }
+ true
+ } else {
+ false
+ }
+ })
+ {
+ // The type of the argument corresponding to the expression that got moved
+ // is a type parameter `T`, which is has a `T: AsRef` obligation.
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "borrow the value to avoid moving it",
+ format!("&{}", if is_mut { "mut " } else { "" }),
+ Applicability::MachineApplicable,
+ );
+ can_suggest_clone = is_mut;
+ } else {
+ let mut span: MultiSpan = arg.span.into();
+ span.push_span_label(
+ arg.span,
+ "this parameter takes ownership of the value".to_string(),
+ );
+ let descr = match node.fn_kind() {
+ Some(hir::intravisit::FnKind::ItemFn(..)) | None => "function",
+ Some(hir::intravisit::FnKind::Method(..)) => "method",
+ Some(hir::intravisit::FnKind::Closure) => "closure",
+ };
+ span.push_span_label(ident.span, format!("in this {descr}"));
+ err.span_note(
+ span,
+ format!(
+ "consider changing this parameter type in {descr} `{ident}` to \
+ borrow instead if owning the value isn't necessary",
+ ),
+ );
+ }
}
let place = &self.move_data.move_paths[mpi].place;
let ty = place.ty(self.body, self.infcx.tcx).ty;
@@ -487,15 +538,23 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, '_, 'infcx, 'tcx> {
ClosureKind::Coroutine(CoroutineKind::Desugared(_, CoroutineSource::Block)),
..
} = move_spans
+ && can_suggest_clone
{
self.suggest_cloning(err, ty, expr, None, Some(move_spans));
- } else if self.suggest_hoisting_call_outside_loop(err, expr) {
+ } else if self.suggest_hoisting_call_outside_loop(err, expr) && can_suggest_clone {
// The place where the type moves would be misleading to suggest clone.
// #121466
self.suggest_cloning(err, ty, expr, None, Some(move_spans));
}
}
- if let Some(pat) = finder.pat {
+
+ self.suggest_ref_for_dbg_args(expr, place, move_span, err);
+
+ // it's useless to suggest inserting `ref` when the span don't comes from local code
+ if let Some(pat) = finder.pat
+ && !move_span.is_dummy()
+ && !self.infcx.tcx.sess.source_map().is_imported(move_span)
+ {
*in_pattern = true;
let mut sugg = vec![(pat.span.shrink_to_lo(), "ref ".to_string())];
if let Some(pat) = finder.parent_pat {
@@ -510,6 +569,59 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, '_, 'infcx, 'tcx> {
}
}
+ // for dbg!(x) which may take ownership, suggest dbg!(&x) instead
+ // but here we actually do not check whether the macro name is `dbg!`
+ // so that we may extend the scope a bit larger to cover more cases
+ fn suggest_ref_for_dbg_args(
+ &self,
+ body: &hir::Expr<'_>,
+ place: &Place<'tcx>,
+ move_span: Span,
+ err: &mut Diag<'infcx>,
+ ) {
+ let var_info = self.body.var_debug_info.iter().find(|info| match info.value {
+ VarDebugInfoContents::Place(ref p) => p == place,
+ _ => false,
+ });
+ let arg_name = if let Some(var_info) = var_info {
+ var_info.name
+ } else {
+ return;
+ };
+ struct MatchArgFinder {
+ expr_span: Span,
+ match_arg_span: Option,
+ arg_name: Symbol,
+ }
+ impl Visitor<'_> for MatchArgFinder {
+ fn visit_expr(&mut self, e: &hir::Expr<'_>) {
+ // dbg! is expanded into a match pattern, we need to find the right argument span
+ if let hir::ExprKind::Match(expr, ..) = &e.kind
+ && let hir::ExprKind::Path(hir::QPath::Resolved(
+ _,
+ path @ Path { segments: [seg], .. },
+ )) = &expr.kind
+ && seg.ident.name == self.arg_name
+ && self.expr_span.source_callsite().contains(expr.span)
+ {
+ self.match_arg_span = Some(path.span);
+ }
+ hir::intravisit::walk_expr(self, e);
+ }
+ }
+
+ let mut finder = MatchArgFinder { expr_span: move_span, match_arg_span: None, arg_name };
+ finder.visit_expr(body);
+ if let Some(macro_arg_span) = finder.match_arg_span {
+ err.span_suggestion_verbose(
+ macro_arg_span.shrink_to_lo(),
+ "consider borrowing instead of transferring ownership",
+ "&",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
fn report_use_of_uninitialized(
&self,
mpi: MovePathIndex,
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
index d258c68b9598..9ad941dabbe6 100644
--- a/compiler/rustc_borrowck/src/lib.rs
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -821,6 +821,8 @@ impl<'a, 'mir, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx, R>
| TerminatorKind::Return
| TerminatorKind::TailCall { .. }
| TerminatorKind::CoroutineDrop => {
+ // Returning from the function implicitly kills storage for all locals and statics.
+ // Often, the storage will already have been killed by an explicit
// StorageDead, but we don't always emit those (notably on unwind paths),
// so this "extra check" serves as a kind of backup.
let borrow_set = self.borrow_set.clone();
diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock
index 15c9e9d66fac..efec5db836bb 100644
--- a/compiler/rustc_codegen_cranelift/Cargo.lock
+++ b/compiler/rustc_codegen_cranelift/Cargo.lock
@@ -16,9 +16,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.82"
+version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
+checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "arbitrary"
@@ -67,7 +67,7 @@ dependencies = [
"cranelift-entity",
"cranelift-isle",
"gimli",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.5",
"log",
"regalloc2",
"rustc-hash",
@@ -182,9 +182,9 @@ dependencies = [
[[package]]
name = "crc32fast"
-version = "1.4.0"
+version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
+checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
dependencies = [
"cfg-if",
]
@@ -203,9 +203,9 @@ checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
[[package]]
name = "gimli"
-version = "0.28.0"
+version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
+checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
dependencies = [
"fallible-iterator",
"indexmap",
@@ -223,9 +223,9 @@ dependencies = [
[[package]]
name = "hashbrown"
-version = "0.14.3"
+version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
+checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
dependencies = [
"ahash",
]
@@ -237,20 +237,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
dependencies = [
"equivalent",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.5",
]
[[package]]
name = "libc"
-version = "0.2.153"
+version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
+checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "libloading"
-version = "0.8.3"
+version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19"
+checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d"
dependencies = [
"cfg-if",
"windows-targets",
@@ -258,9 +258,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.21"
+version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "mach"
@@ -273,9 +273,9 @@ dependencies = [
[[package]]
name = "memchr"
-version = "2.7.2"
+version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "object"
@@ -284,7 +284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce"
dependencies = [
"crc32fast",
- "hashbrown 0.14.3",
+ "hashbrown 0.14.5",
"indexmap",
"memchr",
]
@@ -297,9 +297,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "proc-macro2"
-version = "1.0.81"
+version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
dependencies = [
"unicode-ident",
]
@@ -382,9 +382,9 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "syn"
-version = "2.0.60"
+version = "2.0.70"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3"
+checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16"
dependencies = [
"proc-macro2",
"quote",
@@ -393,9 +393,9 @@ dependencies = [
[[package]]
name = "target-lexicon"
-version = "0.12.14"
+version = "0.12.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f"
+checksum = "4873307b7c257eddcb50c9bedf158eb669578359fb28428bef438fec8e6ba7c2"
[[package]]
name = "unicode-ident"
@@ -454,9 +454,9 @@ dependencies = [
[[package]]
name = "windows-targets"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
@@ -470,66 +470,66 @@ dependencies = [
[[package]]
name = "windows_aarch64_gnullvm"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
-version = "0.52.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "zerocopy"
-version = "0.7.32"
+version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
+checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
-version = "0.7.32"
+version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
+checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
index cfa91744a0e8..db9b551bd2a2 100644
--- a/compiler/rustc_codegen_cranelift/rust-toolchain
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -1,3 +1,3 @@
[toolchain]
-channel = "nightly-2024-06-30"
+channel = "nightly-2024-07-13"
components = ["rust-src", "rustc-dev", "llvm-tools"]
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
index c1b7e4b0e076..f0550c23b177 100755
--- a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
@@ -34,6 +34,7 @@ rm tests/ui/parser/unclosed-delimiter-in-dep.rs # submodule contains //~ERROR
# vendor intrinsics
rm tests/ui/asm/x86_64/evex512-implicit-feature.rs # unimplemented AVX512 x86 vendor intrinsic
+rm tests/ui/simd/dont-invalid-bitcast-x86_64.rs # unimplemented llvm.x86.sse41.round.ps
# exotic linkages
rm tests/incremental/hashes/function_interfaces.rs
@@ -56,13 +57,13 @@ rm -r tests/run-make/target-specs # i686 not supported by Cranelift
rm -r tests/run-make/mismatching-target-triples # same
rm tests/ui/asm/x86_64/issue-96797.rs # const and sym inline asm operands don't work entirely correctly
rm tests/ui/asm/x86_64/goto.rs # inline asm labels not supported
+rm tests/ui/simd/simd-bitmask-notpow2.rs # non-pow-of-2 simd vector sizes
# requires LTO
rm -r tests/run-make/cdylib
rm -r tests/run-make/codegen-options-parsing
rm -r tests/run-make/lto-*
rm -r tests/run-make/reproducible-build-2
-rm -r tests/run-make/issue-109934-lto-debuginfo
rm -r tests/run-make/no-builtins-lto
rm -r tests/run-make/reachable-extern-fn-available-lto
@@ -109,6 +110,7 @@ rm -r tests/run-make/symbols-include-type-name
rm -r tests/run-make/notify-all-emit-artifacts
rm -r tests/run-make/reset-codegen-1
rm -r tests/run-make/inline-always-many-cgu
+rm -r tests/run-make/intrinsic-unreachable
# giving different but possibly correct results
# =============================================
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index 9dc94ab33ea9..fa0de6f9de5e 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -395,7 +395,6 @@ pub(crate) fn codegen_terminator_call<'tcx>(
crate::intrinsics::codegen_llvm_intrinsic_call(
fx,
&fx.tcx.symbol_name(instance).name,
- fn_args,
args,
ret_place,
target,
diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs
index 414d3db1c51a..3f23e0d9e046 100644
--- a/compiler/rustc_codegen_cranelift/src/archive.rs
+++ b/compiler/rustc_codegen_cranelift/src/archive.rs
@@ -14,12 +14,12 @@ impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
fn create_dll_import_lib(
&self,
- _sess: &Session,
+ sess: &Session,
_lib_name: &str,
_dll_imports: &[rustc_session::cstore::DllImport],
_tmpdir: &Path,
_is_direct_dependency: bool,
) -> PathBuf {
- unimplemented!("creating dll imports is not yet supported");
+ sess.dcx().fatal("raw-dylib is not yet supported by rustc_codegen_cranelift");
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index fd34ed88c0b2..0ba163f50aec 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -385,15 +385,43 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
if let Some(section_name) = section_name {
let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
- let section_name = section_name.as_str();
- if let Some(names) = section_name.split_once(',') {
- names
- } else {
+ // See https://github.com/llvm/llvm-project/blob/main/llvm/lib/MC/MCSectionMachO.cpp
+ let mut parts = section_name.as_str().split(',');
+ let Some(segment_name) = parts.next() else {
tcx.dcx().fatal(format!(
"#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
section_name
));
+ };
+ let Some(section_name) = parts.next() else {
+ tcx.dcx().fatal(format!(
+ "#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
+ section_name
+ ));
+ };
+ if section_name.len() > 16 {
+ tcx.dcx().fatal(format!(
+ "#[link_section = \"{}\"] is not valid for macos target: section name bigger than 16 bytes",
+ section_name
+ ));
}
+ let section_type = parts.next().unwrap_or("regular");
+ if section_type != "regular" && section_type != "cstring_literals" {
+ tcx.dcx().fatal(format!(
+ "#[link_section = \"{}\"] is not supported: unsupported section type {}",
+ section_name, section_type,
+ ));
+ }
+ let _attrs = parts.next();
+ if parts.next().is_some() {
+ tcx.dcx().fatal(format!(
+ "#[link_section = \"{}\"] is not valid for macos target: too many components",
+ section_name
+ ));
+ }
+ // FIXME(bytecodealliance/wasmtime#8901) set S_CSTRING_LITERALS section type when
+ // cstring_literals is specified
+ (segment_name, section_name)
} else {
("", section_name.as_str())
};
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs
index 65f4c67b21f1..1c6e471cc870 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs
@@ -39,7 +39,13 @@ impl WriteDebugInfo for ObjectProduct {
let section_id = self.object.add_section(
segment,
name,
- if id == SectionId::EhFrame { SectionKind::ReadOnlyData } else { SectionKind::Debug },
+ if id == SectionId::DebugStr || id == SectionId::DebugLineStr {
+ SectionKind::DebugString
+ } else if id == SectionId::EhFrame {
+ SectionKind::ReadOnlyData
+ } else {
+ SectionKind::Debug
+ },
);
self.object
.section_mut(section_id)
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
index e50c74b87f60..720a0d8fbf59 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -6,32 +6,16 @@ use crate::prelude::*;
pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- generic_args: GenericArgsRef<'tcx>,
args: &[Spanned>],
ret: CPlace<'tcx>,
target: Option,
span: Span,
) {
if intrinsic.starts_with("llvm.aarch64") {
- return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call(
- fx,
- intrinsic,
- generic_args,
- args,
- ret,
- target,
- );
+ return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call(fx, intrinsic, args, ret, target);
}
if intrinsic.starts_with("llvm.x86") {
- return llvm_x86::codegen_x86_llvm_intrinsic_call(
- fx,
- intrinsic,
- generic_args,
- args,
- ret,
- target,
- span,
- );
+ return llvm_x86::codegen_x86_llvm_intrinsic_call(fx, intrinsic, args, ret, target, span);
}
match intrinsic {
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
index e66bcbf4e40e..f0fb18608e07 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
@@ -6,7 +6,6 @@ use crate::prelude::*;
pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _args: GenericArgsRef<'tcx>,
args: &[Spanned>],
ret: CPlace<'tcx>,
target: Option,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index 399518e58d8c..e1896138e487 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -10,7 +10,6 @@ use crate::prelude::*;
pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _args: GenericArgsRef<'tcx>,
args: &[Spanned>],
ret: CPlace<'tcx>,
target: Option,
diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs
index a48c0a4450c2..f47bfdad1312 100644
--- a/compiler/rustc_codegen_gcc/example/mini_core.rs
+++ b/compiler/rustc_codegen_gcc/example/mini_core.rs
@@ -1,5 +1,5 @@
#![feature(
- no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
+ no_core, lang_items, intrinsics, unboxed_closures, extern_types,
decl_macro, rustc_attrs, transparent_unions, auto_traits, freeze_impls,
thread_local
)]
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index cd82894af18e..e76694700263 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -6,7 +6,6 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFuncti
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::config::{FunctionReturn, OptLevel};
use rustc_span::symbol::sym;
-use rustc_target::spec::abi::Abi;
use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
use smallvec::SmallVec;
@@ -482,7 +481,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
return;
}
- let mut function_features = function_features
+ let function_features = function_features
.iter()
.flat_map(|feat| {
llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{f}"))
@@ -504,17 +503,6 @@ pub fn from_fn_attrs<'ll, 'tcx>(
let name = name.as_str();
to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
}
-
- // The `"wasm"` abi on wasm targets automatically enables the
- // `+multivalue` feature because the purpose of the wasm abi is to match
- // the WebAssembly specification, which has this feature. This won't be
- // needed when LLVM enables this `multivalue` feature by default.
- if !cx.tcx.is_closure_like(instance.def_id()) {
- let abi = cx.tcx.fn_sig(instance.def_id()).skip_binder().abi();
- if abi == Abi::Wasm {
- function_features.push("+multivalue".to_string());
- }
- }
}
let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index b969fe27a99b..14a944685870 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -66,8 +66,15 @@ impl<'tcx> FunctionCoverageCollector<'tcx> {
// For each expression ID that is directly used by one or more mappings,
// mark it as not-yet-seen. This indicates that we expect to see a
// corresponding `ExpressionUsed` statement during MIR traversal.
- for term in function_coverage_info.mappings.iter().flat_map(|m| m.kind.terms()) {
- if let CovTerm::Expression(id) = term {
+ for mapping in function_coverage_info.mappings.iter() {
+ // Currently we only worry about ordinary code mappings.
+ // For branch and MC/DC mappings, expressions might not correspond
+ // to any particular point in the control-flow graph.
+ // (Keep this in sync with the injection of `ExpressionUsed`
+ // statements in the `InstrumentCoverage` MIR pass.)
+ if let MappingKind::Code(term) = mapping.kind
+ && let CovTerm::Expression(id) = term
+ {
expressions_seen.remove(id);
}
}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 08e9e312827c..e0bf6110cdf0 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -2057,7 +2057,7 @@ extern "C" {
AddrOpsCount: c_uint,
DL: &'a DILocation,
InsertAtEnd: &'a BasicBlock,
- ) -> &'a Value;
+ );
pub fn LLVMRustDIBuilderCreateEnumerator<'a>(
Builder: &DIBuilder<'a>,
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 2bd5dfdce83e..dd134ebbe6b1 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -398,7 +398,7 @@ impl<'a> GccLinker<'a> {
self.link_arg("-dylib");
// Note that the `osx_rpath_install_name` option here is a hack
- // purely to support rustbuild right now, we should get a more
+ // purely to support bootstrap right now, we should get a more
// principled solution at some point to force the compiler to pass
// the right `-Wl,-install_name` with an `@rpath` in it.
if self.sess.opts.cg.rpath || self.sess.opts.unstable_opts.osx_rpath_install_name {
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
index bcddfe9fb9cb..cea164df6173 100644
--- a/compiler/rustc_codegen_ssa/src/target_features.rs
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -80,6 +80,8 @@ pub fn from_target_feature(
Some(sym::loongarch_target_feature) => rust_features.loongarch_target_feature,
Some(sym::lahfsahf_target_feature) => rust_features.lahfsahf_target_feature,
Some(sym::prfchw_target_feature) => rust_features.prfchw_target_feature,
+ Some(sym::x86_amx_intrinsics) => rust_features.x86_amx_intrinsics,
+ Some(sym::xop_target_feature) => rust_features.xop_target_feature,
Some(name) => bug!("unknown target feature gate {}", name),
None => true,
};
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index d86f1a7a34f2..b227565f8f91 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -20,7 +20,7 @@ use super::{
err_inval, err_ub_custom, err_unsup_format, memory::MemoryKind, throw_inval, throw_ub_custom,
throw_ub_format, util::ensure_monomorphic_enough, Allocation, CheckInAllocMsg, ConstAllocation,
GlobalId, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, Pointer, PointerArithmetic,
- Scalar,
+ Provenance, Scalar,
};
use crate::fluent_generated as fluent;
@@ -259,25 +259,28 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// This will always return 0.
(a, b)
}
- (Err(_), _) | (_, Err(_)) => {
- // We managed to find a valid allocation for one pointer, but not the other.
- // That means they are definitely not pointing to the same allocation.
+ _ if M::Provenance::OFFSET_IS_ADDR && a.addr() == b.addr() => {
+ // At least one of the pointers has provenance, but they also point to
+ // the same address so it doesn't matter; this is fine. `(0, 0)` means
+ // we pass all the checks below and return 0.
+ (0, 0)
+ }
+ // From here onwards, the pointers are definitely for different addresses
+ // (or we can't determine their absolute address).
+ (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _)))
+ if a_alloc_id == b_alloc_id =>
+ {
+ // Found allocation for both, and it's the same.
+ // Use these offsets for distance calculation.
+ (a_offset.bytes(), b_offset.bytes())
+ }
+ _ => {
+ // Not into the same allocation -- this is UB.
throw_ub_custom!(
fluent::const_eval_offset_from_different_allocations,
name = intrinsic_name,
);
}
- (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _))) => {
- // Found allocation for both. They must be into the same allocation.
- if a_alloc_id != b_alloc_id {
- throw_ub_custom!(
- fluent::const_eval_offset_from_different_allocations,
- name = intrinsic_name,
- );
- }
- // Use these offsets for distance calculation.
- (a_offset.bytes(), b_offset.bytes())
- }
};
// Compute distance.
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index baaee67e7871..33c25b746ccc 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -995,13 +995,25 @@ where
}
/// Returns a wide MPlace of type `str` to a new 1-aligned allocation.
+ /// Immutable strings are deduplicated and stored in global memory.
pub fn allocate_str(
&mut self,
str: &str,
kind: MemoryKind,
mutbl: Mutability,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
- let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?;
+ let tcx = self.tcx.tcx;
+
+ // Use cache for immutable strings.
+ let ptr = if mutbl.is_not() {
+ // Use dedup'd allocation function.
+ let id = tcx.allocate_bytes_dedup(str.as_bytes());
+
+ // Turn untagged "global" pointers (obtained via `tcx`) into the machine pointer to the allocation.
+ M::adjust_alloc_root_pointer(&self, Pointer::from(id), Some(kind))?
+ } else {
+ self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?
+ };
let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
let layout = self.layout_of(self.tcx.types.str_).unwrap();
Ok(self.ptr_with_meta_to_mplace(ptr.into(), MemPlaceMeta::Meta(meta), layout))
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index c4b2e067bbeb..e5e733439ea0 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -15,6 +15,7 @@ jobserver_crate = { version = "0.1.28", package = "jobserver" }
measureme = "11"
rustc-hash = "1.1.0"
rustc-rayon = { version = "0.5.0", optional = true }
+rustc-stable-hash = { version = "0.1.0", features = ["nightly"] }
rustc_arena = { path = "../rustc_arena" }
rustc_graphviz = { path = "../rustc_graphviz" }
rustc_index = { path = "../rustc_index", package = "rustc_index" }
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
index 1bee159489dc..30e3d6aa86ce 100644
--- a/compiler/rustc_data_structures/src/fingerprint.rs
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -1,5 +1,5 @@
use crate::stable_hasher::impl_stable_traits_for_trivial_type;
-use crate::stable_hasher::{Hash64, StableHasher, StableHasherResult};
+use crate::stable_hasher::{FromStableHash, Hash64, StableHasherHash};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use std::hash::{Hash, Hasher};
@@ -154,10 +154,11 @@ impl FingerprintHasher for crate::unhash::Unhasher {
}
}
-impl StableHasherResult for Fingerprint {
+impl FromStableHash for Fingerprint {
+ type Hash = StableHasherHash;
+
#[inline]
- fn finish(hasher: StableHasher) -> Self {
- let (_0, _1) = hasher.finalize();
+ fn from(StableHasherHash([_0, _1]): Self::Hash) -> Self {
Fingerprint(_0, _1)
}
}
diff --git a/compiler/rustc_data_structures/src/hashes.rs b/compiler/rustc_data_structures/src/hashes.rs
index 1564eeb4baee..ef5d2e845ef0 100644
--- a/compiler/rustc_data_structures/src/hashes.rs
+++ b/compiler/rustc_data_structures/src/hashes.rs
@@ -11,7 +11,7 @@
//! connect the fact that they can only be produced by a `StableHasher` to their
//! `Encode`/`Decode` impls.
-use crate::stable_hasher::{StableHasher, StableHasherResult};
+use crate::stable_hasher::{FromStableHash, StableHasherHash};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fmt;
use std::ops::BitXorAssign;
@@ -56,10 +56,12 @@ impl Decodable for Hash64 {
}
}
-impl StableHasherResult for Hash64 {
+impl FromStableHash for Hash64 {
+ type Hash = StableHasherHash;
+
#[inline]
- fn finish(hasher: StableHasher) -> Self {
- Self { inner: hasher.finalize().0 }
+ fn from(StableHasherHash([_0, __1]): Self::Hash) -> Self {
+ Self { inner: _0 }
}
}
@@ -121,10 +123,11 @@ impl Decodable for Hash128 {
}
}
-impl StableHasherResult for Hash128 {
+impl FromStableHash for Hash128 {
+ type Hash = StableHasherHash;
+
#[inline]
- fn finish(hasher: StableHasher) -> Self {
- let (_0, _1) = hasher.finalize();
+ fn from(StableHasherHash([_0, _1]): Self::Hash) -> Self {
Self { inner: u128::from(_0) | (u128::from(_1) << 64) }
}
}
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 356ddf014bee..3f18b036940b 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -24,7 +24,6 @@
#![feature(core_intrinsics)]
#![feature(extend_one)]
#![feature(hash_raw_entry)]
-#![feature(hasher_prefixfree_extras)]
#![feature(macro_metavar_expr)]
#![feature(map_try_insert)]
#![feature(min_specialization)]
@@ -67,7 +66,6 @@ pub mod owned_slice;
pub mod packed;
pub mod profiling;
pub mod sharded;
-pub mod sip128;
pub mod small_c_str;
pub mod snapshot_map;
pub mod sorted_map;
diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs
deleted file mode 100644
index 812ed410a94b..000000000000
--- a/compiler/rustc_data_structures/src/sip128.rs
+++ /dev/null
@@ -1,505 +0,0 @@
-//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
-
-// This code is very hot and uses lots of arithmetic, avoid overflow checks for performance.
-// See https://github.com/rust-lang/rust/pull/119440#issuecomment-1874255727
-use rustc_serialize::int_overflow::{DebugStrictAdd, DebugStrictSub};
-use std::hash::Hasher;
-use std::mem::{self, MaybeUninit};
-use std::ptr;
-
-#[cfg(test)]
-mod tests;
-
-// The SipHash algorithm operates on 8-byte chunks.
-const ELEM_SIZE: usize = mem::size_of::();
-
-// Size of the buffer in number of elements, not including the spill.
-//
-// The selection of this size was guided by rustc-perf benchmark comparisons of
-// different buffer sizes. It should be periodically reevaluated as the compiler
-// implementation and input characteristics change.
-//
-// Using the same-sized buffer for everything we hash is a performance versus
-// complexity tradeoff. The ideal buffer size, and whether buffering should even
-// be used, depends on what is being hashed. It may be worth it to size the
-// buffer appropriately (perhaps by making SipHasher128 generic over the buffer
-// size) or disable buffering depending on what is being hashed. But at this
-// time, we use the same buffer size for everything.
-const BUFFER_CAPACITY: usize = 8;
-
-// Size of the buffer in bytes, not including the spill.
-const BUFFER_SIZE: usize = BUFFER_CAPACITY * ELEM_SIZE;
-
-// Size of the buffer in number of elements, including the spill.
-const BUFFER_WITH_SPILL_CAPACITY: usize = BUFFER_CAPACITY + 1;
-
-// Size of the buffer in bytes, including the spill.
-const BUFFER_WITH_SPILL_SIZE: usize = BUFFER_WITH_SPILL_CAPACITY * ELEM_SIZE;
-
-// Index of the spill element in the buffer.
-const BUFFER_SPILL_INDEX: usize = BUFFER_WITH_SPILL_CAPACITY - 1;
-
-#[derive(Debug, Clone)]
-#[repr(C)]
-pub struct SipHasher128 {
- // The access pattern during hashing consists of accesses to `nbuf` and
- // `buf` until the buffer is full, followed by accesses to `state` and
- // `processed`, and then repetition of that pattern until hashing is done.
- // This is the basis for the ordering of fields below. However, in practice
- // the cache miss-rate for data access is extremely low regardless of order.
- nbuf: usize, // how many bytes in buf are valid
- buf: [MaybeUninit; BUFFER_WITH_SPILL_CAPACITY], // unprocessed bytes le
- state: State, // hash State
- processed: usize, // how many bytes we've processed
-}
-
-#[derive(Debug, Clone, Copy)]
-#[repr(C)]
-struct State {
- // v0, v2 and v1, v3 show up in pairs in the algorithm,
- // and simd implementations of SipHash will use vectors
- // of v02 and v13. By placing them in this order in the struct,
- // the compiler can pick up on just a few simd optimizations by itself.
- v0: u64,
- v2: u64,
- v1: u64,
- v3: u64,
-}
-
-macro_rules! compress {
- ($state:expr) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }};
- ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
- $v0 = $v0.wrapping_add($v1);
- $v2 = $v2.wrapping_add($v3);
- $v1 = $v1.rotate_left(13);
- $v1 ^= $v0;
- $v3 = $v3.rotate_left(16);
- $v3 ^= $v2;
- $v0 = $v0.rotate_left(32);
-
- $v2 = $v2.wrapping_add($v1);
- $v0 = $v0.wrapping_add($v3);
- $v1 = $v1.rotate_left(17);
- $v1 ^= $v2;
- $v3 = $v3.rotate_left(21);
- $v3 ^= $v0;
- $v2 = $v2.rotate_left(32);
- }};
-}
-
-// Copies up to 8 bytes from source to destination. This performs better than
-// `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real
-// workloads since all of the copies have fixed sizes and avoid calling memcpy.
-//
-// This is specifically designed for copies of up to 8 bytes, because that's the
-// maximum of number bytes needed to fill an 8-byte-sized element on which
-// SipHash operates. Note that for variable-sized copies which are known to be
-// less than 8 bytes, this function will perform more work than necessary unless
-// the compiler is able to optimize the extra work away.
-#[inline]
-unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
- debug_assert!(count <= 8);
-
- unsafe {
- if count == 8 {
- ptr::copy_nonoverlapping(src, dst, 8);
- return;
- }
-
- let mut i = 0;
- if i.debug_strict_add(3) < count {
- ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
- i = i.debug_strict_add(4);
- }
-
- if i.debug_strict_add(1) < count {
- ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
- i = i.debug_strict_add(2)
- }
-
- if i < count {
- *dst.add(i) = *src.add(i);
- i = i.debug_strict_add(1);
- }
-
- debug_assert_eq!(i, count);
- }
-}
-
-// # Implementation
-//
-// This implementation uses buffering to reduce the hashing cost for inputs
-// consisting of many small integers. Buffering simplifies the integration of
-// integer input--the integer write function typically just appends to the
-// buffer with a statically sized write, updates metadata, and returns.
-//
-// Buffering also prevents alternating between writes that do and do not trigger
-// the hashing process. Only when the entire buffer is full do we transition
-// into hashing. This allows us to keep the hash state in registers for longer,
-// instead of loading and storing it before and after processing each element.
-//
-// When a write fills the buffer, a buffer processing function is invoked to
-// hash all of the buffered input. The buffer processing functions are marked
-// `#[inline(never)]` so that they aren't inlined into the append functions,
-// which ensures the more frequently called append functions remain inlineable
-// and don't include register pushing/popping that would only be made necessary
-// by inclusion of the complex buffer processing path which uses those
-// registers.
-//
-// The buffer includes a "spill"--an extra element at the end--which simplifies
-// the integer write buffer processing path. The value that fills the buffer can
-// be written with a statically sized write that may spill over into the spill.
-// After the buffer is processed, the part of the value that spilled over can be
-// written from the spill to the beginning of the buffer with another statically
-// sized write. This write may copy more bytes than actually spilled over, but
-// we maintain the metadata such that any extra copied bytes will be ignored by
-// subsequent processing. Due to the static sizes, this scheme performs better
-// than copying the exact number of bytes needed into the end and beginning of
-// the buffer.
-//
-// The buffer is uninitialized, which improves performance, but may preclude
-// efficient implementation of alternative approaches. The improvement is not so
-// large that an alternative approach should be disregarded because it cannot be
-// efficiently implemented with an uninitialized buffer. On the other hand, an
-// uninitialized buffer may become more important should a larger one be used.
-//
-// # Platform Dependence
-//
-// The SipHash algorithm operates on byte sequences. It parses the input stream
-// as 8-byte little-endian integers. Therefore, given the same byte sequence, it
-// produces the same result on big- and little-endian hardware.
-//
-// However, the Hasher trait has methods which operate on multi-byte integers.
-// How they are converted into byte sequences can be endian-dependent (by using
-// native byte order) or independent (by consistently using either LE or BE byte
-// order). It can also be `isize` and `usize` size dependent (by using the
-// native size), or independent (by converting to a common size), supposing the
-// values can be represented in 32 bits.
-//
-// In order to make `SipHasher128` consistent with `SipHasher` in libstd, we
-// choose to do the integer to byte sequence conversion in the platform-
-// dependent way. Clients can achieve platform-independent hashing by widening
-// `isize` and `usize` integers to 64 bits on 32-bit systems and byte-swapping
-// integers on big-endian systems before passing them to the writing functions.
-// This causes the input byte sequence to look identical on big- and little-
-// endian systems (supposing `isize` and `usize` values can be represented in 32
-// bits), which ensures platform-independent results.
-impl SipHasher128 {
- #[inline]
- pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
- let mut hasher = SipHasher128 {
- nbuf: 0,
- buf: [MaybeUninit::uninit(); BUFFER_WITH_SPILL_CAPACITY],
- state: State {
- v0: key0 ^ 0x736f6d6570736575,
- // The XOR with 0xee is only done on 128-bit algorithm version.
- v1: key1 ^ (0x646f72616e646f6d ^ 0xee),
- v2: key0 ^ 0x6c7967656e657261,
- v3: key1 ^ 0x7465646279746573,
- },
- processed: 0,
- };
-
- unsafe {
- // Initialize spill because we read from it in `short_write_process_buffer`.
- *hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed();
- }
-
- hasher
- }
-
- #[inline]
- pub fn short_write(&mut self, bytes: [u8; LEN]) {
- let nbuf = self.nbuf;
- debug_assert!(LEN <= 8);
- debug_assert!(nbuf < BUFFER_SIZE);
- debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
-
- if nbuf.debug_strict_add(LEN) < BUFFER_SIZE {
- unsafe {
- // The memcpy call is optimized away because the size is known.
- let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
- ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
- }
-
- self.nbuf = nbuf.debug_strict_add(LEN);
-
- return;
- }
-
- unsafe { self.short_write_process_buffer(bytes) }
- }
-
- // A specialized write function for values with size <= 8 that should only
- // be called when the write would cause the buffer to fill.
- //
- // SAFETY: the write of `x` into `self.buf` starting at byte offset
- // `self.nbuf` must cause `self.buf` to become fully initialized (and not
- // overflow) if it wasn't already.
- #[inline(never)]
- unsafe fn short_write_process_buffer(&mut self, bytes: [u8; LEN]) {
- unsafe {
- let nbuf = self.nbuf;
- debug_assert!(LEN <= 8);
- debug_assert!(nbuf < BUFFER_SIZE);
- debug_assert!(nbuf + LEN >= BUFFER_SIZE);
- debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
-
- // Copy first part of input into end of buffer, possibly into spill
- // element. The memcpy call is optimized away because the size is known.
- let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
- ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
-
- // Process buffer.
- for i in 0..BUFFER_CAPACITY {
- let elem = self.buf.get_unchecked(i).assume_init().to_le();
- self.state.v3 ^= elem;
- Sip13Rounds::c_rounds(&mut self.state);
- self.state.v0 ^= elem;
- }
-
- // Copy remaining input into start of buffer by copying LEN - 1
- // elements from spill (at most LEN - 1 bytes could have overflowed
- // into the spill). The memcpy call is optimized away because the size
- // is known. And the whole copy is optimized away for LEN == 1.
- let dst = self.buf.as_mut_ptr() as *mut u8;
- let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
- ptr::copy_nonoverlapping(src, dst, LEN - 1);
-
- // This function should only be called when the write fills the buffer.
- // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
- // LEN is statically known, so the branch is optimized away.
- self.nbuf =
- if LEN == 1 { 0 } else { nbuf.debug_strict_add(LEN).debug_strict_sub(BUFFER_SIZE) };
- self.processed = self.processed.debug_strict_add(BUFFER_SIZE);
- }
- }
-
- // A write function for byte slices.
- #[inline]
- fn slice_write(&mut self, msg: &[u8]) {
- let length = msg.len();
- let nbuf = self.nbuf;
- debug_assert!(nbuf < BUFFER_SIZE);
-
- if nbuf.debug_strict_add(length) < BUFFER_SIZE {
- unsafe {
- let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
-
- if length <= 8 {
- copy_nonoverlapping_small(msg.as_ptr(), dst, length);
- } else {
- // This memcpy is *not* optimized away.
- ptr::copy_nonoverlapping(msg.as_ptr(), dst, length);
- }
- }
-
- self.nbuf = nbuf.debug_strict_add(length);
-
- return;
- }
-
- unsafe { self.slice_write_process_buffer(msg) }
- }
-
- // A write function for byte slices that should only be called when the
- // write would cause the buffer to fill.
- //
- // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`,
- // and `msg` must contain enough bytes to initialize the rest of the element
- // containing the byte offset `self.nbuf`.
- #[inline(never)]
- unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
- unsafe {
- let length = msg.len();
- let nbuf = self.nbuf;
- debug_assert!(nbuf < BUFFER_SIZE);
- debug_assert!(nbuf + length >= BUFFER_SIZE);
-
- // Always copy first part of input into current element of buffer.
- // This function should only be called when the write fills the buffer,
- // so we know that there is enough input to fill the current element.
- let valid_in_elem = nbuf % ELEM_SIZE;
- let needed_in_elem = ELEM_SIZE.debug_strict_sub(valid_in_elem);
-
- let src = msg.as_ptr();
- let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
- copy_nonoverlapping_small(src, dst, needed_in_elem);
-
- // Process buffer.
-
- // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
- // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
- // We know that is true, because last step ensured we have a full
- // element in the buffer.
- let last = (nbuf / ELEM_SIZE).debug_strict_add(1);
-
- for i in 0..last {
- let elem = self.buf.get_unchecked(i).assume_init().to_le();
- self.state.v3 ^= elem;
- Sip13Rounds::c_rounds(&mut self.state);
- self.state.v0 ^= elem;
- }
-
- // Process the remaining element-sized chunks of input.
- let mut processed = needed_in_elem;
- let input_left = length.debug_strict_sub(processed);
- let elems_left = input_left / ELEM_SIZE;
- let extra_bytes_left = input_left % ELEM_SIZE;
-
- for _ in 0..elems_left {
- let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
- self.state.v3 ^= elem;
- Sip13Rounds::c_rounds(&mut self.state);
- self.state.v0 ^= elem;
- processed = processed.debug_strict_add(ELEM_SIZE);
- }
-
- // Copy remaining input into start of buffer.
- let src = msg.as_ptr().add(processed);
- let dst = self.buf.as_mut_ptr() as *mut u8;
- copy_nonoverlapping_small(src, dst, extra_bytes_left);
-
- self.nbuf = extra_bytes_left;
- self.processed = self.processed.debug_strict_add(nbuf.debug_strict_add(processed));
- }
- }
-
- #[inline]
- pub fn finish128(mut self) -> (u64, u64) {
- debug_assert!(self.nbuf < BUFFER_SIZE);
-
- // Process full elements in buffer.
- let last = self.nbuf / ELEM_SIZE;
-
- // Since we're consuming self, avoid updating members for a potential
- // performance gain.
- let mut state = self.state;
-
- for i in 0..last {
- let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() };
- state.v3 ^= elem;
- Sip13Rounds::c_rounds(&mut state);
- state.v0 ^= elem;
- }
-
- // Get remaining partial element.
- let elem = if self.nbuf % ELEM_SIZE != 0 {
- unsafe {
- // Ensure element is initialized by writing zero bytes. At most
- // `ELEM_SIZE - 1` are required given the above check. It's safe
- // to write this many because we have the spill and we maintain
- // `self.nbuf` such that this write will start before the spill.
- let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf);
- ptr::write_bytes(dst, 0, ELEM_SIZE - 1);
- self.buf.get_unchecked(last).assume_init().to_le()
- }
- } else {
- 0
- };
-
- // Finalize the hash.
- let length = self.processed.debug_strict_add(self.nbuf);
- let b: u64 = ((length as u64 & 0xff) << 56) | elem;
-
- state.v3 ^= b;
- Sip13Rounds::c_rounds(&mut state);
- state.v0 ^= b;
-
- state.v2 ^= 0xee;
- Sip13Rounds::d_rounds(&mut state);
- let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
-
- state.v1 ^= 0xdd;
- Sip13Rounds::d_rounds(&mut state);
- let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
-
- (_0, _1)
- }
-}
-
-impl Hasher for SipHasher128 {
- #[inline]
- fn write_u8(&mut self, i: u8) {
- self.short_write(i.to_ne_bytes());
- }
-
- #[inline]
- fn write_u16(&mut self, i: u16) {
- self.short_write(i.to_ne_bytes());
- }
-
- #[inline]
- fn write_u32(&mut self, i: u32) {
- self.short_write(i.to_ne_bytes());
- }
-
- #[inline]
- fn write_u64(&mut self, i: u64) {
- self.short_write(i.to_ne_bytes());
- }
-
- #[inline]
- fn write_usize(&mut self, i: usize) {
- self.short_write(i.to_ne_bytes());
- }
-
- #[inline]
- fn write_i8(&mut self, i: i8) {
- self.short_write((i as u8).to_ne_bytes());
- }
-
- #[inline]
- fn write_i16(&mut self, i: i16) {
- self.short_write((i as u16).to_ne_bytes());
- }
-
- #[inline]
- fn write_i32(&mut self, i: i32) {
- self.short_write((i as u32).to_ne_bytes());
- }
-
- #[inline]
- fn write_i64(&mut self, i: i64) {
- self.short_write((i as u64).to_ne_bytes());
- }
-
- #[inline]
- fn write_isize(&mut self, i: isize) {
- self.short_write((i as usize).to_ne_bytes());
- }
-
- #[inline]
- fn write(&mut self, msg: &[u8]) {
- self.slice_write(msg);
- }
-
- #[inline]
- fn write_str(&mut self, s: &str) {
- // This hasher works byte-wise, and `0xFF` cannot show up in a `str`,
- // so just hashing the one extra byte is enough to be prefix-free.
- self.write(s.as_bytes());
- self.write_u8(0xFF);
- }
-
- fn finish(&self) -> u64 {
- panic!("SipHasher128 cannot provide valid 64 bit hashes")
- }
-}
-
-#[derive(Debug, Clone, Default)]
-struct Sip13Rounds;
-
-impl Sip13Rounds {
- #[inline]
- fn c_rounds(state: &mut State) {
- compress!(state);
- }
-
- #[inline]
- fn d_rounds(state: &mut State) {
- compress!(state);
- compress!(state);
- compress!(state);
- }
-}
diff --git a/compiler/rustc_data_structures/src/sip128/tests.rs b/compiler/rustc_data_structures/src/sip128/tests.rs
deleted file mode 100644
index e9dd0f1176b9..000000000000
--- a/compiler/rustc_data_structures/src/sip128/tests.rs
+++ /dev/null
@@ -1,304 +0,0 @@
-use super::*;
-
-use std::hash::Hash;
-
-// Hash just the bytes of the slice, without length prefix
-struct Bytes<'a>(&'a [u8]);
-
-impl<'a> Hash for Bytes<'a> {
- #[allow(unused_must_use)]
- fn hash(&self, state: &mut H) {
- for byte in self.0 {
- state.write_u8(*byte);
- }
- }
-}
-
-fn hash_with(mut st: SipHasher128, x: &T) -> (u64, u64) {
- x.hash(&mut st);
- st.finish128()
-}
-
-fn hash(x: &T) -> (u64, u64) {
- hash_with(SipHasher128::new_with_keys(0, 0), x)
-}
-#[rustfmt::skip]
-const TEST_VECTOR: [[u8; 16]; 64] = [
- [0xe7, 0x7e, 0xbc, 0xb2, 0x27, 0x88, 0xa5, 0xbe, 0xfd, 0x62, 0xdb, 0x6a, 0xdd, 0x30, 0x30, 0x01],
- [0xfc, 0x6f, 0x37, 0x04, 0x60, 0xd3, 0xed, 0xa8, 0x5e, 0x05, 0x73, 0xcc, 0x2b, 0x2f, 0xf0, 0x63],
- [0x75, 0x78, 0x7f, 0x09, 0x05, 0x69, 0x83, 0x9b, 0x85, 0x5b, 0xc9, 0x54, 0x8c, 0x6a, 0xea, 0x95],
- [0x6b, 0xc5, 0xcc, 0xfa, 0x1e, 0xdc, 0xf7, 0x9f, 0x48, 0x23, 0x18, 0x77, 0x12, 0xeb, 0xd7, 0x43],
- [0x0c, 0x78, 0x4e, 0x71, 0xac, 0x2b, 0x28, 0x5a, 0x9f, 0x8e, 0x92, 0xe7, 0x8f, 0xbf, 0x2c, 0x25],
- [0xf3, 0x28, 0xdb, 0x89, 0x34, 0x5b, 0x62, 0x0c, 0x79, 0x52, 0x29, 0xa4, 0x26, 0x95, 0x84, 0x3e],
- [0xdc, 0xd0, 0x3d, 0x29, 0xf7, 0x43, 0xe7, 0x10, 0x09, 0x51, 0xb0, 0xe8, 0x39, 0x85, 0xa6, 0xf8],
- [0x10, 0x84, 0xb9, 0x23, 0xf2, 0xaa, 0xe0, 0xc3, 0xa6, 0x2f, 0x2e, 0xc8, 0x08, 0x48, 0xab, 0x77],
- [0xaa, 0x12, 0xfe, 0xe1, 0xd5, 0xe3, 0xda, 0xb4, 0x72, 0x4f, 0x16, 0xab, 0x35, 0xf9, 0xc7, 0x99],
- [0x81, 0xdd, 0xb8, 0x04, 0x2c, 0xf3, 0x39, 0x94, 0xf4, 0x72, 0x0e, 0x00, 0x94, 0x13, 0x7c, 0x42],
- [0x4f, 0xaa, 0x54, 0x1d, 0x5d, 0x49, 0x8e, 0x89, 0xba, 0x0e, 0xa4, 0xc3, 0x87, 0xb2, 0x2f, 0xb4],
- [0x72, 0x3b, 0x9a, 0xf3, 0x55, 0x44, 0x91, 0xdb, 0xb1, 0xd6, 0x63, 0x3d, 0xfc, 0x6e, 0x0c, 0x4e],
- [0xe5, 0x3f, 0x92, 0x85, 0x9e, 0x48, 0x19, 0xa8, 0xdc, 0x06, 0x95, 0x73, 0x9f, 0xea, 0x8c, 0x65],
- [0xb2, 0xf8, 0x58, 0xc7, 0xc9, 0xea, 0x80, 0x1d, 0x53, 0xd6, 0x03, 0x59, 0x6d, 0x65, 0x78, 0x44],
- [0x87, 0xe7, 0x62, 0x68, 0xdb, 0xc9, 0x22, 0x72, 0x26, 0xb0, 0xca, 0x66, 0x5f, 0x64, 0xe3, 0x78],
- [0xc1, 0x7e, 0x55, 0x05, 0xb2, 0xbd, 0x52, 0x6c, 0x29, 0x21, 0xcd, 0xec, 0x1e, 0x7e, 0x01, 0x09],
- [0xd0, 0xa8, 0xd9, 0x57, 0x15, 0x51, 0x8e, 0xeb, 0xb5, 0x13, 0xb0, 0xf8, 0x3d, 0x9e, 0x17, 0x93],
- [0x23, 0x41, 0x26, 0xf9, 0x3f, 0xbb, 0x66, 0x8d, 0x97, 0x51, 0x12, 0xe8, 0xfe, 0xbd, 0xf7, 0xec],
- [0xef, 0x42, 0xf0, 0x3d, 0xb7, 0x8f, 0x70, 0x4d, 0x02, 0x3c, 0x44, 0x9f, 0x16, 0xb7, 0x09, 0x2b],
- [0xab, 0xf7, 0x62, 0x38, 0xc2, 0x0a, 0xf1, 0x61, 0xb2, 0x31, 0x4b, 0x4d, 0x55, 0x26, 0xbc, 0xe9],
- [0x3c, 0x2c, 0x2f, 0x11, 0xbb, 0x90, 0xcf, 0x0b, 0xe3, 0x35, 0xca, 0x9b, 0x2e, 0x91, 0xe9, 0xb7],
- [0x2a, 0x7a, 0x68, 0x0f, 0x22, 0xa0, 0x2a, 0x92, 0xf4, 0x51, 0x49, 0xd2, 0x0f, 0xec, 0xe0, 0xef],
- [0xc9, 0xa8, 0xd1, 0x30, 0x23, 0x1d, 0xd4, 0x3e, 0x42, 0xe6, 0x45, 0x69, 0x57, 0xf8, 0x37, 0x79],
- [0x1d, 0x12, 0x7b, 0x84, 0x40, 0x5c, 0xea, 0xb9, 0x9f, 0xd8, 0x77, 0x5a, 0x9b, 0xe6, 0xc5, 0x59],
- [0x9e, 0x4b, 0xf8, 0x37, 0xbc, 0xfd, 0x92, 0xca, 0xce, 0x09, 0xd2, 0x06, 0x1a, 0x84, 0xd0, 0x4a],
- [0x39, 0x03, 0x1a, 0x96, 0x5d, 0x73, 0xb4, 0xaf, 0x5a, 0x27, 0x4d, 0x18, 0xf9, 0x73, 0xb1, 0xd2],
- [0x7f, 0x4d, 0x0a, 0x12, 0x09, 0xd6, 0x7e, 0x4e, 0xd0, 0x6f, 0x75, 0x38, 0xe1, 0xcf, 0xad, 0x64],
- [0xe6, 0x1e, 0xe2, 0x40, 0xfb, 0xdc, 0xce, 0x38, 0x96, 0x9f, 0x4c, 0xd2, 0x49, 0x27, 0xdd, 0x93],
- [0x4c, 0x3b, 0xa2, 0xb3, 0x7b, 0x0f, 0xdd, 0x8c, 0xfa, 0x5e, 0x95, 0xc1, 0x89, 0xb2, 0x94, 0x14],
- [0xe0, 0x6f, 0xd4, 0xca, 0x06, 0x6f, 0xec, 0xdd, 0x54, 0x06, 0x8a, 0x5a, 0xd8, 0x89, 0x6f, 0x86],
- [0x5c, 0xa8, 0x4c, 0x34, 0x13, 0x9c, 0x65, 0x80, 0xa8, 0x8a, 0xf2, 0x49, 0x90, 0x72, 0x07, 0x06],
- [0x42, 0xea, 0x96, 0x1c, 0x5b, 0x3c, 0x85, 0x8b, 0x17, 0xc3, 0xe5, 0x50, 0xdf, 0xa7, 0x90, 0x10],
- [0x40, 0x6c, 0x44, 0xde, 0xe6, 0x78, 0x57, 0xb2, 0x94, 0x31, 0x60, 0xf3, 0x0c, 0x74, 0x17, 0xd3],
- [0xc5, 0xf5, 0x7b, 0xae, 0x13, 0x20, 0xfc, 0xf4, 0xb4, 0xe8, 0x68, 0xe7, 0x1d, 0x56, 0xc6, 0x6b],
- [0x04, 0xbf, 0x73, 0x7a, 0x5b, 0x67, 0x6b, 0xe7, 0xc3, 0xde, 0x05, 0x01, 0x7d, 0xf4, 0xbf, 0xf9],
- [0x51, 0x63, 0xc9, 0xc0, 0x3f, 0x19, 0x07, 0xea, 0x10, 0x44, 0xed, 0x5c, 0x30, 0x72, 0x7b, 0x4f],
- [0x37, 0xa1, 0x10, 0xf0, 0x02, 0x71, 0x8e, 0xda, 0xd2, 0x4b, 0x3f, 0x9e, 0xe4, 0x53, 0xf1, 0x40],
- [0xb9, 0x87, 0x7e, 0x38, 0x1a, 0xed, 0xd3, 0xda, 0x08, 0xc3, 0x3e, 0x75, 0xff, 0x23, 0xac, 0x10],
- [0x7c, 0x50, 0x04, 0x00, 0x5e, 0xc5, 0xda, 0x4c, 0x5a, 0xc9, 0x44, 0x0e, 0x5c, 0x72, 0x31, 0x93],
- [0x81, 0xb8, 0x24, 0x37, 0x83, 0xdb, 0xc6, 0x46, 0xca, 0x9d, 0x0c, 0xd8, 0x2a, 0xbd, 0xb4, 0x6c],
- [0x50, 0x57, 0x20, 0x54, 0x3e, 0xb9, 0xb4, 0x13, 0xd5, 0x0b, 0x3c, 0xfa, 0xd9, 0xee, 0xf9, 0x38],
- [0x94, 0x5f, 0x59, 0x4d, 0xe7, 0x24, 0x11, 0xe4, 0xd3, 0x35, 0xbe, 0x87, 0x44, 0x56, 0xd8, 0xf3],
- [0x37, 0x92, 0x3b, 0x3e, 0x37, 0x17, 0x77, 0xb2, 0x11, 0x70, 0xbf, 0x9d, 0x7e, 0x62, 0xf6, 0x02],
- [0x3a, 0xd4, 0xe7, 0xc8, 0x57, 0x64, 0x96, 0x46, 0x11, 0xeb, 0x0a, 0x6c, 0x4d, 0x62, 0xde, 0x56],
- [0xcd, 0x91, 0x39, 0x6c, 0x44, 0xaf, 0x4f, 0x51, 0x85, 0x57, 0x8d, 0x9d, 0xd9, 0x80, 0x3f, 0x0a],
- [0xfe, 0x28, 0x15, 0x8e, 0x72, 0x7b, 0x86, 0x8f, 0x39, 0x03, 0xc9, 0xac, 0xda, 0x64, 0xa2, 0x58],
- [0x40, 0xcc, 0x10, 0xb8, 0x28, 0x8c, 0xe5, 0xf0, 0xbc, 0x3a, 0xc0, 0xb6, 0x8a, 0x0e, 0xeb, 0xc8],
- [0x6f, 0x14, 0x90, 0xf5, 0x40, 0x69, 0x9a, 0x3c, 0xd4, 0x97, 0x44, 0x20, 0xec, 0xc9, 0x27, 0x37],
- [0xd5, 0x05, 0xf1, 0xb7, 0x5e, 0x1a, 0x84, 0xa6, 0x03, 0xc4, 0x35, 0x83, 0xb2, 0xed, 0x03, 0x08],
- [0x49, 0x15, 0x73, 0xcf, 0xd7, 0x2b, 0xb4, 0x68, 0x2b, 0x7c, 0xa5, 0x88, 0x0e, 0x1c, 0x8d, 0x6f],
- [0x3e, 0xd6, 0x9c, 0xfe, 0x45, 0xab, 0x40, 0x3f, 0x2f, 0xd2, 0xad, 0x95, 0x9b, 0xa2, 0x76, 0x66],
- [0x8b, 0xe8, 0x39, 0xef, 0x1b, 0x20, 0xb5, 0x7c, 0x83, 0xba, 0x7e, 0xb6, 0xa8, 0xc2, 0x2b, 0x6a],
- [0x14, 0x09, 0x18, 0x6a, 0xb4, 0x22, 0x31, 0xfe, 0xde, 0xe1, 0x81, 0x62, 0xcf, 0x1c, 0xb4, 0xca],
- [0x2b, 0xf3, 0xcc, 0xc2, 0x4a, 0xb6, 0x72, 0xcf, 0x15, 0x1f, 0xb8, 0xd2, 0xf3, 0xf3, 0x06, 0x9b],
- [0xb9, 0xb9, 0x3a, 0x28, 0x82, 0xd6, 0x02, 0x5c, 0xdb, 0x8c, 0x56, 0xfa, 0x13, 0xf7, 0x53, 0x7b],
- [0xd9, 0x7c, 0xca, 0x36, 0x94, 0xfb, 0x20, 0x6d, 0xb8, 0xbd, 0x1f, 0x36, 0x50, 0xc3, 0x33, 0x22],
- [0x94, 0xec, 0x2e, 0x19, 0xa4, 0x0b, 0xe4, 0x1a, 0xf3, 0x94, 0x0d, 0x6b, 0x30, 0xc4, 0x93, 0x84],
- [0x4b, 0x41, 0x60, 0x3f, 0x20, 0x9a, 0x04, 0x5b, 0xe1, 0x40, 0xa3, 0x41, 0xa3, 0xdf, 0xfe, 0x10],
- [0x23, 0xfb, 0xcb, 0x30, 0x9f, 0x1c, 0xf0, 0x94, 0x89, 0x07, 0x55, 0xab, 0x1b, 0x42, 0x65, 0x69],
- [0xe7, 0xd9, 0xb6, 0x56, 0x90, 0x91, 0x8a, 0x2b, 0x23, 0x2f, 0x2f, 0x5c, 0x12, 0xc8, 0x30, 0x0e],
- [0xad, 0xe8, 0x3c, 0xf7, 0xe7, 0xf3, 0x84, 0x7b, 0x36, 0xfa, 0x4b, 0x54, 0xb0, 0x0d, 0xce, 0x61],
- [0x06, 0x10, 0xc5, 0xf2, 0xee, 0x57, 0x1c, 0x8a, 0xc8, 0x0c, 0xbf, 0xe5, 0x38, 0xbd, 0xf1, 0xc7],
- [0x27, 0x1d, 0x5d, 0x00, 0xfb, 0xdb, 0x5d, 0x15, 0x5d, 0x9d, 0xce, 0xa9, 0x7c, 0xb4, 0x02, 0x18],
- [0x4c, 0x58, 0x00, 0xe3, 0x4e, 0xfe, 0x42, 0x6f, 0x07, 0x9f, 0x6b, 0x0a, 0xa7, 0x52, 0x60, 0xad],
-];
-
-#[test]
-fn test_siphash_1_3_test_vector() {
- let k0 = 0x_07_06_05_04_03_02_01_00;
- let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
-
- let mut input: Vec = Vec::new();
-
- for i in 0..64 {
- let out = hash_with(SipHasher128::new_with_keys(k0, k1), &Bytes(&input[..]));
- let expected = (
- ((TEST_VECTOR[i][0] as u64) << 0)
- | ((TEST_VECTOR[i][1] as u64) << 8)
- | ((TEST_VECTOR[i][2] as u64) << 16)
- | ((TEST_VECTOR[i][3] as u64) << 24)
- | ((TEST_VECTOR[i][4] as u64) << 32)
- | ((TEST_VECTOR[i][5] as u64) << 40)
- | ((TEST_VECTOR[i][6] as u64) << 48)
- | ((TEST_VECTOR[i][7] as u64) << 56),
- ((TEST_VECTOR[i][8] as u64) << 0)
- | ((TEST_VECTOR[i][9] as u64) << 8)
- | ((TEST_VECTOR[i][10] as u64) << 16)
- | ((TEST_VECTOR[i][11] as u64) << 24)
- | ((TEST_VECTOR[i][12] as u64) << 32)
- | ((TEST_VECTOR[i][13] as u64) << 40)
- | ((TEST_VECTOR[i][14] as u64) << 48)
- | ((TEST_VECTOR[i][15] as u64) << 56),
- );
-
- assert_eq!(out, expected);
- input.push(i as u8);
- }
-}
-
-#[test]
-#[cfg(target_arch = "arm")]
-fn test_hash_usize() {
- let val = 0xdeadbeef_deadbeef_u64;
- assert!(hash(&(val as u64)) != hash(&(val as usize)));
- assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
-}
-#[test]
-#[cfg(target_arch = "x86_64")]
-fn test_hash_usize() {
- let val = 0xdeadbeef_deadbeef_u64;
- assert_eq!(hash(&(val as u64)), hash(&(val as usize)));
- assert!(hash(&(val as u32)) != hash(&(val as usize)));
-}
-#[test]
-#[cfg(target_arch = "x86")]
-fn test_hash_usize() {
- let val = 0xdeadbeef_deadbeef_u64;
- assert!(hash(&(val as u64)) != hash(&(val as usize)));
- assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
-}
-
-#[test]
-fn test_hash_idempotent() {
- let val64 = 0xdeadbeef_deadbeef_u64;
- assert_eq!(hash(&val64), hash(&val64));
- let val32 = 0xdeadbeef_u32;
- assert_eq!(hash(&val32), hash(&val32));
-}
-
-#[test]
-fn test_hash_no_bytes_dropped_64() {
- let val = 0xdeadbeef_deadbeef_u64;
-
- assert!(hash(&val) != hash(&zero_byte(val, 0)));
- assert!(hash(&val) != hash(&zero_byte(val, 1)));
- assert!(hash(&val) != hash(&zero_byte(val, 2)));
- assert!(hash(&val) != hash(&zero_byte(val, 3)));
- assert!(hash(&val) != hash(&zero_byte(val, 4)));
- assert!(hash(&val) != hash(&zero_byte(val, 5)));
- assert!(hash(&val) != hash(&zero_byte(val, 6)));
- assert!(hash(&val) != hash(&zero_byte(val, 7)));
-
- fn zero_byte(val: u64, byte: usize) -> u64 {
- assert!(byte < 8);
- val & !(0xff << (byte * 8))
- }
-}
-
-#[test]
-fn test_hash_no_bytes_dropped_32() {
- let val = 0xdeadbeef_u32;
-
- assert!(hash(&val) != hash(&zero_byte(val, 0)));
- assert!(hash(&val) != hash(&zero_byte(val, 1)));
- assert!(hash(&val) != hash(&zero_byte(val, 2)));
- assert!(hash(&val) != hash(&zero_byte(val, 3)));
-
- fn zero_byte(val: u32, byte: usize) -> u32 {
- assert!(byte < 4);
- val & !(0xff << (byte * 8))
- }
-}
-
-#[test]
-fn test_hash_no_concat_alias() {
- let s = ("aa", "bb");
- let t = ("aabb", "");
- let u = ("a", "abb");
-
- assert!(s != t && t != u);
- assert!(hash(&s) != hash(&t) && hash(&s) != hash(&u));
-
- let u = [1, 0, 0, 0];
- let v = (&u[..1], &u[1..3], &u[3..]);
- let w = (&u[..], &u[4..4], &u[4..4]);
-
- assert!(v != w);
- assert!(hash(&v) != hash(&w));
-}
-
-#[test]
-fn test_short_write_works() {
- let test_u8 = 0xFF_u8;
- let test_u16 = 0x1122_u16;
- let test_u32 = 0x22334455_u32;
- let test_u64 = 0x33445566_778899AA_u64;
- let test_u128 = 0x11223344_55667788_99AABBCC_DDEEFF77_u128;
- let test_usize = 0xD0C0B0A0_usize;
-
- let test_i8 = -1_i8;
- let test_i16 = -2_i16;
- let test_i32 = -3_i32;
- let test_i64 = -4_i64;
- let test_i128 = -5_i128;
- let test_isize = -6_isize;
-
- let mut h1 = SipHasher128::new_with_keys(0, 0);
- h1.write(b"bytes");
- h1.write(b"string");
- h1.write_u8(test_u8);
- h1.write_u16(test_u16);
- h1.write_u32(test_u32);
- h1.write_u64(test_u64);
- h1.write_u128(test_u128);
- h1.write_usize(test_usize);
- h1.write_i8(test_i8);
- h1.write_i16(test_i16);
- h1.write_i32(test_i32);
- h1.write_i64(test_i64);
- h1.write_i128(test_i128);
- h1.write_isize(test_isize);
-
- let mut h2 = SipHasher128::new_with_keys(0, 0);
- h2.write(b"bytes");
- h2.write(b"string");
- h2.write(&test_u8.to_ne_bytes());
- h2.write(&test_u16.to_ne_bytes());
- h2.write(&test_u32.to_ne_bytes());
- h2.write(&test_u64.to_ne_bytes());
- h2.write(&test_u128.to_ne_bytes());
- h2.write(&test_usize.to_ne_bytes());
- h2.write(&test_i8.to_ne_bytes());
- h2.write(&test_i16.to_ne_bytes());
- h2.write(&test_i32.to_ne_bytes());
- h2.write(&test_i64.to_ne_bytes());
- h2.write(&test_i128.to_ne_bytes());
- h2.write(&test_isize.to_ne_bytes());
-
- let h1_hash = h1.finish128();
- let h2_hash = h2.finish128();
-
- assert_eq!(h1_hash, h2_hash);
-}
-
-macro_rules! test_fill_buffer {
- ($type:ty, $write_method:ident) => {{
- // Test filling and overfilling the buffer from all possible offsets
- // for a given integer type and its corresponding write method.
- const SIZE: usize = std::mem::size_of::<$type>();
- let input = [42; BUFFER_SIZE];
- let x = 0x01234567_89ABCDEF_76543210_FEDCBA98_u128 as $type;
- let x_bytes = &x.to_ne_bytes();
-
- for i in 1..=SIZE {
- let s = &input[..BUFFER_SIZE - i];
-
- let mut h1 = SipHasher128::new_with_keys(7, 13);
- h1.write(s);
- h1.$write_method(x);
-
- let mut h2 = SipHasher128::new_with_keys(7, 13);
- h2.write(s);
- h2.write(x_bytes);
-
- let h1_hash = h1.finish128();
- let h2_hash = h2.finish128();
-
- assert_eq!(h1_hash, h2_hash);
- }
- }};
-}
-
-#[test]
-fn test_fill_buffer() {
- test_fill_buffer!(u8, write_u8);
- test_fill_buffer!(u16, write_u16);
- test_fill_buffer!(u32, write_u32);
- test_fill_buffer!(u64, write_u64);
- test_fill_buffer!(u128, write_u128);
- test_fill_buffer!(usize, write_usize);
-
- test_fill_buffer!(i8, write_i8);
- test_fill_buffer!(i16, write_i16);
- test_fill_buffer!(i32, write_i32);
- test_fill_buffer!(i64, write_i64);
- test_fill_buffer!(i128, write_i128);
- test_fill_buffer!(isize, write_isize);
-}
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
index a57f5067dd8d..83883eeba9ca 100644
--- a/compiler/rustc_data_structures/src/stable_hasher.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -1,8 +1,6 @@
-use crate::sip128::SipHasher128;
use rustc_index::bit_set::{self, BitSet};
use rustc_index::{Idx, IndexSlice, IndexVec};
use smallvec::SmallVec;
-use std::fmt;
use std::hash::{BuildHasher, Hash, Hasher};
use std::marker::PhantomData;
use std::mem;
@@ -13,163 +11,9 @@ mod tests;
pub use crate::hashes::{Hash128, Hash64};
-/// When hashing something that ends up affecting properties like symbol names,
-/// we want these symbol names to be calculated independently of other factors
-/// like what architecture you're compiling *from*.
-///
-/// To that end we always convert integers to little-endian format before
-/// hashing and the architecture dependent `isize` and `usize` types are
-/// extended to 64 bits if needed.
-pub struct StableHasher {
- state: SipHasher128,
-}
-
-impl fmt::Debug for StableHasher {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{:?}", self.state)
- }
-}
-
-pub trait StableHasherResult: Sized {
- fn finish(hasher: StableHasher) -> Self;
-}
-
-impl StableHasher {
- #[inline]
- pub fn new() -> Self {
- StableHasher { state: SipHasher128::new_with_keys(0, 0) }
- }
-
- #[inline]
- pub fn finish(self) -> W {
- W::finish(self)
- }
-}
-
-impl StableHasher {
- #[inline]
- pub fn finalize(self) -> (u64, u64) {
- self.state.finish128()
- }
-}
-
-impl Hasher for StableHasher {
- fn finish(&self) -> u64 {
- panic!("use StableHasher::finalize instead");
- }
-
- #[inline]
- fn write(&mut self, bytes: &[u8]) {
- self.state.write(bytes);
- }
-
- #[inline]
- fn write_str(&mut self, s: &str) {
- self.state.write_str(s);
- }
-
- #[inline]
- fn write_length_prefix(&mut self, len: usize) {
- // Our impl for `usize` will extend it if needed.
- self.write_usize(len);
- }
-
- #[inline]
- fn write_u8(&mut self, i: u8) {
- self.state.write_u8(i);
- }
-
- #[inline]
- fn write_u16(&mut self, i: u16) {
- self.state.short_write(i.to_le_bytes());
- }
-
- #[inline]
- fn write_u32(&mut self, i: u32) {
- self.state.short_write(i.to_le_bytes());
- }
-
- #[inline]
- fn write_u64(&mut self, i: u64) {
- self.state.short_write(i.to_le_bytes());
- }
-
- #[inline]
- fn write_u128(&mut self, i: u128) {
- self.write_u64(i as u64);
- self.write_u64((i >> 64) as u64);
- }
-
- #[inline]
- fn write_usize(&mut self, i: usize) {
- // Always treat usize as u64 so we get the same results on 32 and 64 bit
- // platforms. This is important for symbol hashes when cross compiling,
- // for example.
- self.state.short_write((i as u64).to_le_bytes());
- }
-
- #[inline]
- fn write_i8(&mut self, i: i8) {
- self.state.write_i8(i);
- }
-
- #[inline]
- fn write_i16(&mut self, i: i16) {
- self.state.short_write((i as u16).to_le_bytes());
- }
-
- #[inline]
- fn write_i32(&mut self, i: i32) {
- self.state.short_write((i as u32).to_le_bytes());
- }
-
- #[inline]
- fn write_i64(&mut self, i: i64) {
- self.state.short_write((i as u64).to_le_bytes());
- }
-
- #[inline]
- fn write_i128(&mut self, i: i128) {
- self.state.write(&(i as u128).to_le_bytes());
- }
-
- #[inline]
- fn write_isize(&mut self, i: isize) {
- // Always treat isize as a 64-bit number so we get the same results on 32 and 64 bit
- // platforms. This is important for symbol hashes when cross compiling,
- // for example. Sign extending here is preferable as it means that the
- // same negative number hashes the same on both 32 and 64 bit platforms.
- let value = i as u64;
-
- // Cold path
- #[cold]
- #[inline(never)]
- fn hash_value(state: &mut SipHasher128, value: u64) {
- state.write_u8(0xFF);
- state.short_write(value.to_le_bytes());
- }
-
- // `isize` values often seem to have a small (positive) numeric value in practice.
- // To exploit this, if the value is small, we will hash a smaller amount of bytes.
- // However, we cannot just skip the leading zero bytes, as that would produce the same hash
- // e.g. if you hash two values that have the same bit pattern when they are swapped.
- // See https://github.com/rust-lang/rust/pull/93014 for context.
- //
- // Therefore, we employ the following strategy:
- // 1) When we encounter a value that fits within a single byte (the most common case), we
- // hash just that byte. This is the most common case that is being optimized. However, we do
- // not do this for the value 0xFF, as that is a reserved prefix (a bit like in UTF-8).
- // 2) When we encounter a larger value, we hash a "marker" 0xFF and then the corresponding
- // 8 bytes. Since this prefix cannot occur when we hash a single byte, when we hash two
- // `isize`s that fit within a different amount of bytes, they should always produce a different
- // byte stream for the hasher.
- if value < 0xFF {
- self.state.write_u8(value as u8);
- } else {
- hash_value(&mut self.state, value);
- }
- }
-}
+pub use rustc_stable_hash::FromStableHash;
+pub use rustc_stable_hash::SipHasher128Hash as StableHasherHash;
+pub use rustc_stable_hash::StableSipHasher128 as StableHasher;
/// Something that implements `HashStable` can be hashed in a way that is
/// stable across multiple compilation sessions.
diff --git a/compiler/rustc_data_structures/src/stable_hasher/tests.rs b/compiler/rustc_data_structures/src/stable_hasher/tests.rs
index c8921f6a7784..aab50a13af0e 100644
--- a/compiler/rustc_data_structures/src/stable_hasher/tests.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher/tests.rs
@@ -7,71 +7,6 @@ use super::*;
// ways). The expected values depend on the hashing algorithm used, so they
// need to be updated whenever StableHasher changes its hashing algorithm.
-#[test]
-fn test_hash_integers() {
- // Test that integers are handled consistently across platforms.
- let test_u8 = 0xAB_u8;
- let test_u16 = 0xFFEE_u16;
- let test_u32 = 0x445577AA_u32;
- let test_u64 = 0x01234567_13243546_u64;
- let test_u128 = 0x22114433_66557788_99AACCBB_EEDDFF77_u128;
- let test_usize = 0xD0C0B0A0_usize;
-
- let test_i8 = -100_i8;
- let test_i16 = -200_i16;
- let test_i32 = -300_i32;
- let test_i64 = -400_i64;
- let test_i128 = -500_i128;
- let test_isize = -600_isize;
-
- let mut h = StableHasher::new();
- test_u8.hash(&mut h);
- test_u16.hash(&mut h);
- test_u32.hash(&mut h);
- test_u64.hash(&mut h);
- test_u128.hash(&mut h);
- test_usize.hash(&mut h);
- test_i8.hash(&mut h);
- test_i16.hash(&mut h);
- test_i32.hash(&mut h);
- test_i64.hash(&mut h);
- test_i128.hash(&mut h);
- test_isize.hash(&mut h);
-
- // This depends on the hashing algorithm. See note at top of file.
- let expected = (13997337031081104755, 6178945012502239489);
-
- assert_eq!(h.finalize(), expected);
-}
-
-#[test]
-fn test_hash_usize() {
- // Test that usize specifically is handled consistently across platforms.
- let test_usize = 0xABCDEF01_usize;
-
- let mut h = StableHasher::new();
- test_usize.hash(&mut h);
-
- // This depends on the hashing algorithm. See note at top of file.
- let expected = (12037165114281468837, 3094087741167521712);
-
- assert_eq!(h.finalize(), expected);
-}
-
-#[test]
-fn test_hash_isize() {
- // Test that isize specifically is handled consistently across platforms.
- let test_isize = -7_isize;
-
- let mut h = StableHasher::new();
- test_isize.hash(&mut h);
-
- // This depends on the hashing algorithm. See note at top of file.
- let expected = (3979067582695659080, 2322428596355037273);
-
- assert_eq!(h.finalize(), expected);
-}
-
fn hash>(t: &T) -> Hash128 {
let mut h = StableHasher::new();
let ctx = &mut ();
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs
index bfcc2e603de4..160af8a65d9c 100644
--- a/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs
+++ b/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs
@@ -1,5 +1,6 @@
use std::ptr;
+use crate::hashes::Hash128;
use crate::stable_hasher::{HashStable, StableHasher};
use crate::tagged_ptr::{CopyTaggedPtr, Pointer, Tag, Tag2};
@@ -31,14 +32,13 @@ fn stable_hash_hashes_as_tuple() {
let hash_packed = {
let mut hasher = StableHasher::new();
tag_ptr(&12, Tag2::B11).hash_stable(&mut (), &mut hasher);
-
- hasher.finalize()
+ hasher.finish::()
};
let hash_tupled = {
let mut hasher = StableHasher::new();
(&12, Tag2::B11).hash_stable(&mut (), &mut hasher);
- hasher.finalize()
+ hasher.finish::()
};
assert_eq!(hash_packed, hash_tupled);
diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs
index 40e16b451157..9da4aa84db52 100644
--- a/compiler/rustc_expand/src/config.rs
+++ b/compiler/rustc_expand/src/config.rs
@@ -187,6 +187,7 @@ impl<'a> StripUnconfigured<'a> {
.iter()
.filter_map(|tree| match tree.clone() {
AttrTokenTree::AttrsTarget(mut target) => {
+ // Expand any `cfg_attr` attributes.
target.attrs.flat_map_in_place(|attr| self.process_cfg_attr(&attr));
if self.in_cfg(&target.attrs) {
@@ -195,6 +196,8 @@ impl<'a> StripUnconfigured<'a> {
);
Some(AttrTokenTree::AttrsTarget(target))
} else {
+ // Remove the target if there's a `cfg` attribute and
+ // the condition isn't satisfied.
None
}
}
@@ -253,9 +256,9 @@ impl<'a> StripUnconfigured<'a> {
/// Gives a compiler warning when the `cfg_attr` contains no attributes and
/// is in the original source file. Gives a compiler error if the syntax of
/// the attribute is incorrect.
- pub(crate) fn expand_cfg_attr(&self, attr: &Attribute, recursive: bool) -> Vec {
+ pub(crate) fn expand_cfg_attr(&self, cfg_attr: &Attribute, recursive: bool) -> Vec {
let Some((cfg_predicate, expanded_attrs)) =
- rustc_parse::parse_cfg_attr(attr, &self.sess.psess)
+ rustc_parse::parse_cfg_attr(cfg_attr, &self.sess.psess)
else {
return vec![];
};
@@ -264,7 +267,7 @@ impl<'a> StripUnconfigured<'a> {
if expanded_attrs.is_empty() {
self.sess.psess.buffer_lint(
rustc_lint_defs::builtin::UNUSED_ATTRIBUTES,
- attr.span,
+ cfg_attr.span,
ast::CRATE_NODE_ID,
BuiltinLintDiag::CfgAttrNoAttributes,
);
@@ -280,20 +283,21 @@ impl<'a> StripUnconfigured<'a> {
// `#[cfg_attr(false, cfg_attr(true, some_attr))]`.
expanded_attrs
.into_iter()
- .flat_map(|item| self.process_cfg_attr(&self.expand_cfg_attr_item(attr, item)))
+ .flat_map(|item| self.process_cfg_attr(&self.expand_cfg_attr_item(cfg_attr, item)))
.collect()
} else {
- expanded_attrs.into_iter().map(|item| self.expand_cfg_attr_item(attr, item)).collect()
+ expanded_attrs
+ .into_iter()
+ .map(|item| self.expand_cfg_attr_item(cfg_attr, item))
+ .collect()
}
}
fn expand_cfg_attr_item(
&self,
- attr: &Attribute,
+ cfg_attr: &Attribute,
(item, item_span): (ast::AttrItem, Span),
) -> Attribute {
- let orig_tokens = attr.get_tokens();
-
// We are taking an attribute of the form `#[cfg_attr(pred, attr)]`
// and producing an attribute of the form `#[attr]`. We
// have captured tokens for `attr` itself, but we need to
@@ -302,11 +306,11 @@ impl<'a> StripUnconfigured<'a> {
// Use the `#` in `#[cfg_attr(pred, attr)]` as the `#` token
// for `attr` when we expand it to `#[attr]`
- let mut orig_trees = orig_tokens.trees();
+ let mut orig_trees = cfg_attr.token_trees().into_iter();
let TokenTree::Token(pound_token @ Token { kind: TokenKind::Pound, .. }, _) =
orig_trees.next().unwrap().clone()
else {
- panic!("Bad tokens for attribute {attr:?}");
+ panic!("Bad tokens for attribute {cfg_attr:?}");
};
// We don't really have a good span to use for the synthesized `[]`
@@ -320,12 +324,12 @@ impl<'a> StripUnconfigured<'a> {
.unwrap_or_else(|| panic!("Missing tokens for {item:?}"))
.to_attr_token_stream(),
);
- let trees = if attr.style == AttrStyle::Inner {
+ let trees = if cfg_attr.style == AttrStyle::Inner {
// For inner attributes, we do the same thing for the `!` in `#![some_attr]`
let TokenTree::Token(bang_token @ Token { kind: TokenKind::Not, .. }, _) =
orig_trees.next().unwrap().clone()
else {
- panic!("Bad tokens for attribute {attr:?}");
+ panic!("Bad tokens for attribute {cfg_attr:?}");
};
vec![
AttrTokenTree::Token(pound_token, Spacing::Joint),
@@ -340,7 +344,7 @@ impl<'a> StripUnconfigured<'a> {
&self.sess.psess.attr_id_generator,
item,
tokens,
- attr.style,
+ cfg_attr.style,
item_span,
);
if attr.has_name(sym::crate_type) {
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
index f082cc2b5699..e671c7682391 100644
--- a/compiler/rustc_feature/src/accepted.rs
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -42,6 +42,10 @@ declare_features! (
// feature-group-start: accepted features
// -------------------------------------------------------------------------
+ // Note that the version indicates when it got *stabilized*.
+ // When moving an unstable feature here, set the version number to
+ // `CURRENT RUSTC VERSION` with ` ` replaced by `_`.
+
/// Allows `#[target_feature(...)]` on aarch64 platforms
(accepted, aarch64_target_feature, "1.61.0", Some(44839)),
/// Allows using the `efiapi` ABI.
diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs
index bf4293643183..e9d3ce0a0749 100644
--- a/compiler/rustc_feature/src/lib.rs
+++ b/compiler/rustc_feature/src/lib.rs
@@ -31,6 +31,10 @@ use std::num::NonZero;
#[derive(Debug, Clone)]
pub struct Feature {
pub name: Symbol,
+ /// For unstable features: the version the feature was added in.
+ /// For accepted features: the version the feature got stabilized in.
+ /// For removed features we are inconsistent; sometimes this is the
+ /// version it got added, sometimes the version it got removed.
pub since: &'static str,
issue: Option>,
}
diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs
index aea447b2aff1..80a108d2fc87 100644
--- a/compiler/rustc_feature/src/removed.rs
+++ b/compiler/rustc_feature/src/removed.rs
@@ -32,6 +32,12 @@ declare_features! (
// feature-group-start: removed features
// -------------------------------------------------------------------------
+ // Note that the version indicates when it got *removed*.
+ // When moving an unstable feature here, set the version number to
+ // `CURRENT RUSTC VERSION` with ` ` replaced by `_`.
+ // (But not all features below do this properly; many indicate the
+ // version they got originally added in.)
+
/// Allows using the `amdgpu-kernel` ABI.
(removed, abi_amdgpu_kernel, "1.77.0", Some(51575), None),
(removed, advanced_slice_patterns, "1.0.0", Some(62254),
@@ -215,6 +221,9 @@ declare_features! (
/// Permits specifying whether a function should permit unwinding or abort on unwind.
(removed, unwind_attributes, "1.56.0", Some(58760), Some("use the C-unwind ABI instead")),
(removed, visible_private_types, "1.0.0", None, None),
+ /// Allows `extern "wasm" fn`
+ (removed, wasm_abi, "CURRENT_RUSTC_VERSION", Some(83788),
+ Some("non-standard wasm ABI is no longer supported")),
// !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
// Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
// !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
diff --git a/compiler/rustc_feature/src/unstable.rs b/compiler/rustc_feature/src/unstable.rs
index c05cac155b74..1db3774222a0 100644
--- a/compiler/rustc_feature/src/unstable.rs
+++ b/compiler/rustc_feature/src/unstable.rs
@@ -248,6 +248,8 @@ declare_features! (
(unstable, auto_traits, "1.50.0", Some(13231)),
/// Allows using `box` in patterns (RFC 469).
(unstable, box_patterns, "1.0.0", Some(29641)),
+ /// Allows builtin # foo() syntax
+ (internal, builtin_syntax, "1.71.0", Some(110680)),
/// Allows `#[doc(notable_trait)]`.
/// Renamed from `doc_spotlight`.
(unstable, doc_notable_trait, "1.52.0", Some(45040)),
@@ -361,8 +363,6 @@ declare_features! (
(unstable, async_fn_track_caller, "1.73.0", Some(110011)),
/// Allows `for await` loops.
(unstable, async_for_loop, "1.77.0", Some(118898)),
- /// Allows builtin # foo() syntax
- (unstable, builtin_syntax, "1.71.0", Some(110680)),
/// Allows using C-variadics.
(unstable, c_variadic, "1.34.0", Some(44930)),
/// Allows the use of `#[cfg(overflow_checks)` to check if integer overflow behaviour.
@@ -621,8 +621,6 @@ declare_features! (
(unstable, try_blocks, "1.29.0", Some(31436)),
/// Allows `impl Trait` to be used inside type aliases (RFC 2515).
(unstable, type_alias_impl_trait, "1.38.0", Some(63063)),
- /// Allows the use of type ascription in expressions.
- (unstable, type_ascription, "1.6.0", Some(23416)),
/// Allows creation of instances of a struct by moving fields that have
/// not changed from prior instances of the same struct (RFC #2528)
(unstable, type_changing_struct_update, "1.58.0", Some(86555)),
@@ -640,8 +638,10 @@ declare_features! (
(unstable, unsized_tuple_coercion, "1.20.0", Some(42877)),
/// Allows using the `#[used(linker)]` (or `#[used(compiler)]`) attribute.
(unstable, used_with_arg, "1.60.0", Some(93798)),
- /// Allows `extern "wasm" fn`
- (unstable, wasm_abi, "1.53.0", Some(83788)),
+ /// Allows use of x86 `AMX` target-feature attributes and intrinsics
+ (unstable, x86_amx_intrinsics, "CURRENT_RUSTC_VERSION", Some(126622)),
+ /// Allows use of the `xop` target-feature
+ (unstable, xop_target_feature, "CURRENT_RUSTC_VERSION", Some(127208)),
/// Allows `do yeet` expressions
(unstable, yeet_expr, "1.62.0", Some(96373)),
// !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index d57fad6ba4c2..3bd7b300758c 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -2708,6 +2708,13 @@ impl PreciseCapturingArg<'_> {
PreciseCapturingArg::Param(param) => param.hir_id,
}
}
+
+ pub fn name(self) -> Symbol {
+ match self {
+ PreciseCapturingArg::Lifetime(lt) => lt.ident.name,
+ PreciseCapturingArg::Param(param) => param.ident.name,
+ }
+ }
}
/// We need to have a [`Node`] for the [`HirId`] that we attach the type/const param
@@ -3211,10 +3218,10 @@ impl<'hir> Item<'hir> {
ItemKind::Static(ty, mutbl, body), (ty, *mutbl, *body);
expect_const, (&'hir Ty<'hir>, &'hir Generics<'hir>, BodyId),
- ItemKind::Const(ty, gen, body), (ty, gen, *body);
+ ItemKind::Const(ty, generics, body), (ty, generics, *body);
expect_fn, (&FnSig<'hir>, &'hir Generics<'hir>, BodyId),
- ItemKind::Fn(sig, gen, body), (sig, gen, *body);
+ ItemKind::Fn(sig, generics, body), (sig, generics, *body);
expect_macro, (&ast::MacroDef, MacroKind), ItemKind::Macro(def, mk), (def, *mk);
@@ -3226,25 +3233,25 @@ impl<'hir> Item<'hir> {
expect_global_asm, &'hir InlineAsm<'hir>, ItemKind::GlobalAsm(asm), asm;
expect_ty_alias, (&'hir Ty<'hir>, &'hir Generics<'hir>),
- ItemKind::TyAlias(ty, gen), (ty, gen);
+ ItemKind::TyAlias(ty, generics), (ty, generics);
expect_opaque_ty, &OpaqueTy<'hir>, ItemKind::OpaqueTy(ty), ty;
- expect_enum, (&EnumDef<'hir>, &'hir Generics<'hir>), ItemKind::Enum(def, gen), (def, gen);
+ expect_enum, (&EnumDef<'hir>, &'hir Generics<'hir>), ItemKind::Enum(def, generics), (def, generics);
expect_struct, (&VariantData<'hir>, &'hir Generics<'hir>),
- ItemKind::Struct(data, gen), (data, gen);
+ ItemKind::Struct(data, generics), (data, generics);
expect_union, (&VariantData<'hir>, &'hir Generics<'hir>),
- ItemKind::Union(data, gen), (data, gen);
+ ItemKind::Union(data, generics), (data, generics);
expect_trait,
(IsAuto, Safety, &'hir Generics<'hir>, GenericBounds<'hir>, &'hir [TraitItemRef]),
- ItemKind::Trait(is_auto, safety, gen, bounds, items),
- (*is_auto, *safety, gen, bounds, items);
+ ItemKind::Trait(is_auto, safety, generics, bounds, items),
+ (*is_auto, *safety, generics, bounds, items);
expect_trait_alias, (&'hir Generics<'hir>, GenericBounds<'hir>),
- ItemKind::TraitAlias(gen, bounds), (gen, bounds);
+ ItemKind::TraitAlias(generics, bounds), (generics, bounds);
expect_impl, &'hir Impl<'hir>, ItemKind::Impl(imp), imp;
}
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index 0acc119115a7..e0aad2991632 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -35,7 +35,6 @@ use rustc_middle::ty::{self, AdtKind, Const, IsSuggestable, Ty, TyCtxt, Upcast};
use rustc_middle::{bug, span_bug};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
-use rustc_target::abi::FieldIdx;
use rustc_target::spec::abi;
use rustc_trait_selection::error_reporting::traits::suggestions::NextTypeParamName;
use rustc_trait_selection::infer::InferCtxtExt;
@@ -85,7 +84,6 @@ pub fn provide(providers: &mut Providers) {
coroutine_kind,
coroutine_for_closure,
is_type_alias_impl_trait,
- find_field,
..*providers
};
}
@@ -914,23 +912,6 @@ fn lower_enum_variant_types(tcx: TyCtxt<'_>, def_id: DefId) {
}
}
-fn find_field(tcx: TyCtxt<'_>, (def_id, ident): (DefId, Ident)) -> Option {
- let adt = tcx.adt_def(def_id);
- if adt.is_enum() {
- return None;
- }
-
- adt.non_enum_variant().fields.iter_enumerated().find_map(|(idx, field)| {
- if field.is_unnamed() {
- let field_ty = tcx.type_of(field.did).instantiate_identity();
- let adt_def = field_ty.ty_adt_def().expect("expect Adt for unnamed field");
- tcx.find_field((adt_def.did(), ident)).map(|_| idx)
- } else {
- (field.ident(tcx).normalize_to_macros_2_0() == ident).then_some(idx)
- }
- })
-}
-
#[derive(Clone, Copy)]
struct NestedSpan {
span: Span,
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
index f057dbc013fc..035a3429ed76 100644
--- a/compiler/rustc_hir_typeck/src/expr.rs
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -59,6 +59,8 @@ use rustc_trait_selection::infer::InferCtxtExt;
use rustc_trait_selection::traits::ObligationCtxt;
use rustc_trait_selection::traits::{self, ObligationCauseCode};
+use smallvec::SmallVec;
+
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub fn check_expr_has_type_or_error(
&self,
@@ -2318,6 +2320,44 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
display
}
+ /// Find the position of a field named `ident` in `base_def`, accounting for unnammed fields.
+ /// Return whether such a field has been found. The path to it is stored in `nested_fields`.
+ /// `ident` must have been adjusted beforehand.
+ fn find_adt_field(
+ &self,
+ base_def: ty::AdtDef<'tcx>,
+ ident: Ident,
+ nested_fields: &mut SmallVec<[(FieldIdx, &'tcx ty::FieldDef); 1]>,
+ ) -> bool {
+ // No way to find a field in an enum.
+ if base_def.is_enum() {
+ return false;
+ }
+
+ for (field_idx, field) in base_def.non_enum_variant().fields.iter_enumerated() {
+ if field.is_unnamed() {
+ // We have an unnamed field, recurse into the nested ADT to find `ident`.
+ // If we find it there, return immediately, and `nested_fields` will contain the
+ // correct path.
+ nested_fields.push((field_idx, field));
+
+ let field_ty = self.tcx.type_of(field.did).instantiate_identity();
+ let adt_def = field_ty.ty_adt_def().expect("expect Adt for unnamed field");
+ if self.find_adt_field(adt_def, ident, &mut *nested_fields) {
+ return true;
+ }
+
+ nested_fields.pop();
+ } else if field.ident(self.tcx).normalize_to_macros_2_0() == ident {
+ // We found the field we wanted.
+ nested_fields.push((field_idx, field));
+ return true;
+ }
+ }
+
+ false
+ }
+
// Check field access expressions
fn check_field(
&self,
@@ -2339,44 +2379,44 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let body_hir_id = self.tcx.local_def_id_to_hir_id(self.body_id);
let (ident, def_scope) =
self.tcx.adjust_ident_and_get_scope(field, base_def.did(), body_hir_id);
- let mut adt_def = *base_def;
- let mut last_ty = None;
- let mut nested_fields = Vec::new();
- let mut index = None;
// we don't care to report errors for a struct if the struct itself is tainted
- if let Err(guar) = adt_def.non_enum_variant().has_errors() {
+ if let Err(guar) = base_def.non_enum_variant().has_errors() {
return Ty::new_error(self.tcx(), guar);
}
- while let Some(idx) = self.tcx.find_field((adt_def.did(), ident)) {
- let &mut first_idx = index.get_or_insert(idx);
- let field = &adt_def.non_enum_variant().fields[idx];
- let field_ty = self.field_ty(expr.span, field, args);
- if let Some(ty) = last_ty {
- nested_fields.push((ty, idx));
- }
- if field.ident(self.tcx).normalize_to_macros_2_0() == ident {
- // Save the index of all fields regardless of their visibility in case
- // of error recovery.
- self.write_field_index(expr.hir_id, first_idx, nested_fields);
- let adjustments = self.adjust_steps(&autoderef);
- if field.vis.is_accessible_from(def_scope, self.tcx) {
- self.apply_adjustments(base, adjustments);
- self.register_predicates(autoderef.into_obligations());
- self.tcx.check_stability(
- field.did,
- Some(expr.hir_id),
- expr.span,
- None,
- );
- return field_ty;
- }
- private_candidate = Some((adjustments, base_def.did()));
- break;
+ let mut field_path = SmallVec::new();
+ if self.find_adt_field(*base_def, ident, &mut field_path) {
+ let (first_idx, _) = field_path[0];
+ let (_, last_field) = field_path.last().unwrap();
+
+ // Save the index of all fields regardless of their visibility in case
+ // of error recovery.
+ let nested_fields = field_path[..]
+ .array_windows()
+ .map(|[(_, outer), (inner_idx, _)]| {
+ let outer_ty = self.field_ty(expr.span, outer, args);
+ (outer_ty, *inner_idx)
+ })
+ .collect();
+ self.write_field_index(expr.hir_id, first_idx, nested_fields);
+
+ let adjustments = self.adjust_steps(&autoderef);
+ if last_field.vis.is_accessible_from(def_scope, self.tcx) {
+ self.apply_adjustments(base, adjustments);
+ self.register_predicates(autoderef.into_obligations());
+
+ self.tcx.check_stability(
+ last_field.did,
+ Some(expr.hir_id),
+ expr.span,
+ None,
+ );
+ return self.field_ty(expr.span, last_field, args);
}
- last_ty = Some(field_ty);
- adt_def = field_ty.ty_adt_def().expect("expect Adt for unnamed field");
+
+ // The field is not accessible, fall through to error reporting.
+ private_candidate = Some((adjustments, base_def.did()));
}
}
ty::Tuple(tys) => {
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index ab0f356ce91f..9fbb01216bbb 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -948,6 +948,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&mut err,
);
+ self.suggest_deref_unwrap_or(
+ &mut err,
+ error_span,
+ callee_ty,
+ call_ident,
+ expected_ty,
+ provided_ty,
+ provided_args[*provided_idx],
+ is_method,
+ );
+
// Call out where the function is defined
self.label_fn_like(
&mut err,
@@ -2554,7 +2565,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.and_then(|node| node.generics())
.into_iter()
.flat_map(|generics| generics.params)
- .find(|gen| &gen.def_id.to_def_id() == res_def_id)
+ .find(|param| ¶m.def_id.to_def_id() == res_def_id)
} else {
None
}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
index 5f897c74482d..b3b4c5a56fbd 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
@@ -466,21 +466,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
borrow_removal_span,
});
return true;
- } else if let Some((deref_ty, _)) =
- self.autoderef(expr.span, found_ty_inner).silence_errors().nth(1)
- && self.can_eq(self.param_env, deref_ty, peeled)
- && error_tys_equate_as_ref
- {
- let sugg = prefix_wrap(".as_deref()");
- err.subdiagnostic(errors::SuggestConvertViaMethod {
- span: expr.span.shrink_to_hi(),
- sugg,
- expected,
- found,
- borrow_removal_span,
- });
- return true;
- } else if let ty::Adt(adt, _) = found_ty_inner.peel_refs().kind()
+ } else if let ty::Ref(_, peeled_found_ty, _) = found_ty_inner.kind()
+ && let ty::Adt(adt, _) = peeled_found_ty.peel_refs().kind()
&& self.tcx.is_lang_item(adt.did(), LangItem::String)
&& peeled.is_str()
// `Result::map`, conversely, does not take ref of the error type.
@@ -496,12 +483,47 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Applicability::MachineApplicable,
);
return true;
+ } else {
+ if !error_tys_equate_as_ref {
+ return false;
+ }
+ let mut steps = self.autoderef(expr.span, found_ty_inner).silence_errors();
+ if let Some((deref_ty, _)) = steps.nth(1)
+ && self.can_eq(self.param_env, deref_ty, peeled)
+ {
+ let sugg = prefix_wrap(".as_deref()");
+ err.subdiagnostic(errors::SuggestConvertViaMethod {
+ span: expr.span.shrink_to_hi(),
+ sugg,
+ expected,
+ found,
+ borrow_removal_span,
+ });
+ return true;
+ }
+ for (deref_ty, n_step) in steps {
+ if self.can_eq(self.param_env, deref_ty, peeled) {
+ let explicit_deref = "*".repeat(n_step);
+ let sugg = prefix_wrap(&format!(".map(|v| &{explicit_deref}v)"));
+ err.subdiagnostic(errors::SuggestConvertViaMethod {
+ span: expr.span.shrink_to_hi(),
+ sugg,
+ expected,
+ found,
+ borrow_removal_span,
+ });
+ return true;
+ }
+ }
}
}
false
}
+ /// If `ty` is `Option`, returns `T, T, None`.
+ /// If `ty` is `Result`, returns `T, T, Some(E, E)`.
+ /// Otherwise, returns `None`.
fn deconstruct_option_or_result(
&self,
found_ty: Ty<'tcx>,
@@ -1407,6 +1429,74 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
true
}
+ // Suggest to change `Option<&Vec>::unwrap_or(&[])` to `Option::map_or(&[], |v| v)`.
+ #[instrument(level = "trace", skip(self, err, provided_expr))]
+ pub(crate) fn suggest_deref_unwrap_or(
+ &self,
+ err: &mut Diag<'_>,
+ error_span: Span,
+ callee_ty: Option>,
+ call_ident: Option,
+ expected_ty: Ty<'tcx>,
+ provided_ty: Ty<'tcx>,
+ provided_expr: &Expr<'tcx>,
+ is_method: bool,
+ ) {
+ if !is_method {
+ return;
+ }
+ let Some(callee_ty) = callee_ty else {
+ return;
+ };
+ let ty::Adt(callee_adt, _) = callee_ty.peel_refs().kind() else {
+ return;
+ };
+ let adt_name = if self.tcx.is_diagnostic_item(sym::Option, callee_adt.did()) {
+ "Option"
+ } else if self.tcx.is_diagnostic_item(sym::Result, callee_adt.did()) {
+ "Result"
+ } else {
+ return;
+ };
+
+ let Some(call_ident) = call_ident else {
+ return;
+ };
+ if call_ident.name != sym::unwrap_or {
+ return;
+ }
+
+ let ty::Ref(_, peeled, _mutability) = provided_ty.kind() else {
+ return;
+ };
+
+ // NOTE: Can we reuse `suggest_deref_or_ref`?
+
+ // Create an dummy type `&[_]` so that both &[] and `&Vec` can coerce to it.
+ let dummy_ty = if let ty::Array(elem_ty, size) = peeled.kind()
+ && let ty::Infer(_) = elem_ty.kind()
+ && size.try_eval_target_usize(self.tcx, self.param_env) == Some(0)
+ {
+ let slice = Ty::new_slice(self.tcx, *elem_ty);
+ Ty::new_imm_ref(self.tcx, self.tcx.lifetimes.re_static, slice)
+ } else {
+ provided_ty
+ };
+
+ if !self.can_coerce(expected_ty, dummy_ty) {
+ return;
+ }
+ let msg = format!("use `{adt_name}::map_or` to deref inner value of `{adt_name}`");
+ err.multipart_suggestion_verbose(
+ msg,
+ vec![
+ (call_ident.span, "map_or".to_owned()),
+ (provided_expr.span.shrink_to_hi(), ", |v| v".to_owned()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+
/// Suggest wrapping the block in square brackets instead of curly braces
/// in case the block was mistaken array syntax, e.g. `{ 1 }` -> `[ 1 ]`.
pub(crate) fn suggest_block_to_brackets(
diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs
index a87ee7b45548..bdbdcee6446d 100644
--- a/compiler/rustc_hir_typeck/src/lib.rs
+++ b/compiler/rustc_hir_typeck/src/lib.rs
@@ -1,6 +1,7 @@
// tidy-alphabetical-start
#![allow(rustc::diagnostic_outside_of_impl)]
#![allow(rustc::untranslatable_diagnostic)]
+#![feature(array_windows)]
#![feature(box_patterns)]
#![feature(control_flow_enum)]
#![feature(if_let_guard)]
diff --git a/compiler/rustc_infer/messages.ftl b/compiler/rustc_infer/messages.ftl
index fbe8d31370cc..7a5e71599203 100644
--- a/compiler/rustc_infer/messages.ftl
+++ b/compiler/rustc_infer/messages.ftl
@@ -221,6 +221,10 @@ infer_opaque_hidden_type =
infer_outlives_bound = lifetime of the source pointer does not outlive lifetime bound of the object type
infer_outlives_content = lifetime of reference outlives lifetime of borrowed content...
+
+infer_precise_capturing_existing = add `{$new_lifetime}` to the `use<...>` bound to explicitly capture it
+infer_precise_capturing_new = add a `use<...>` bound to explicitly capture `{$new_lifetime}`
+
infer_prlf_defined_with_sub = the lifetime `{$sub_symbol}` defined here...
infer_prlf_defined_without_sub = the lifetime defined here...
infer_prlf_known_limitation = this is a known limitation that will be removed in the future (see issue #100013 for more information)
diff --git a/compiler/rustc_infer/src/errors/mod.rs b/compiler/rustc_infer/src/errors/mod.rs
index a801001eaf98..ce1b0f86d034 100644
--- a/compiler/rustc_infer/src/errors/mod.rs
+++ b/compiler/rustc_infer/src/errors/mod.rs
@@ -1581,3 +1581,32 @@ pub enum ObligationCauseFailureCode {
subdiags: Vec,
},
}
+
+#[derive(Subdiagnostic)]
+pub enum AddPreciseCapturing {
+ #[suggestion(
+ infer_precise_capturing_new,
+ style = "verbose",
+ code = " + use<{concatenated_bounds}>",
+ applicability = "machine-applicable"
+ )]
+ New {
+ #[primary_span]
+ span: Span,
+ new_lifetime: Symbol,
+ concatenated_bounds: String,
+ },
+ #[suggestion(
+ infer_precise_capturing_existing,
+ style = "verbose",
+ code = "{pre}{new_lifetime}{post}",
+ applicability = "machine-applicable"
+ )]
+ Existing {
+ #[primary_span]
+ span: Span,
+ new_lifetime: Symbol,
+ pre: &'static str,
+ post: &'static str,
+ },
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/region.rs b/compiler/rustc_infer/src/infer/error_reporting/region.rs
index 5a465f46e47d..191cb23184da 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/region.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/region.rs
@@ -1,5 +1,6 @@
use std::iter;
+use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::{
struct_span_code_err, Applicability, Diag, Subdiagnostic, E0309, E0310, E0311, E0495,
};
@@ -12,7 +13,7 @@ use rustc_middle::traits::ObligationCauseCode;
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::{self, IsSuggestable, Region, Ty, TyCtxt, TypeVisitableExt as _};
use rustc_span::symbol::kw;
-use rustc_span::{ErrorGuaranteed, Span};
+use rustc_span::{BytePos, ErrorGuaranteed, Span, Symbol};
use rustc_type_ir::Upcast as _;
use super::nice_region_error::find_anon_type;
@@ -1201,17 +1202,21 @@ pub fn unexpected_hidden_region_diagnostic<'a, 'tcx>(
"",
);
if let Some(reg_info) = tcx.is_suitable_region(generic_param_scope, hidden_region) {
- let fn_returns = tcx.return_type_impl_or_dyn_traits(reg_info.def_id);
- nice_region_error::suggest_new_region_bound(
- tcx,
- &mut err,
- fn_returns,
- hidden_region.to_string(),
- None,
- format!("captures `{hidden_region}`"),
- None,
- Some(reg_info.def_id),
- )
+ if infcx.tcx.features().precise_capturing {
+ suggest_precise_capturing(tcx, opaque_ty_key.def_id, hidden_region, &mut err);
+ } else {
+ let fn_returns = tcx.return_type_impl_or_dyn_traits(reg_info.def_id);
+ nice_region_error::suggest_new_region_bound(
+ tcx,
+ &mut err,
+ fn_returns,
+ hidden_region.to_string(),
+ None,
+ format!("captures `{hidden_region}`"),
+ None,
+ Some(reg_info.def_id),
+ )
+ }
}
}
ty::RePlaceholder(_) => {
@@ -1257,3 +1262,95 @@ pub fn unexpected_hidden_region_diagnostic<'a, 'tcx>(
err
}
+
+fn suggest_precise_capturing<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ opaque_def_id: LocalDefId,
+ captured_lifetime: ty::Region<'tcx>,
+ diag: &mut Diag<'_>,
+) {
+ let hir::OpaqueTy { bounds, .. } =
+ tcx.hir_node_by_def_id(opaque_def_id).expect_item().expect_opaque_ty();
+
+ let new_lifetime = Symbol::intern(&captured_lifetime.to_string());
+
+ if let Some((args, span)) = bounds.iter().find_map(|bound| match bound {
+ hir::GenericBound::Use(args, span) => Some((args, span)),
+ _ => None,
+ }) {
+ let last_lifetime_span = args.iter().rev().find_map(|arg| match arg {
+ hir::PreciseCapturingArg::Lifetime(lt) => Some(lt.ident.span),
+ _ => None,
+ });
+
+ let first_param_span = args.iter().find_map(|arg| match arg {
+ hir::PreciseCapturingArg::Param(p) => Some(p.ident.span),
+ _ => None,
+ });
+
+ let (span, pre, post) = if let Some(last_lifetime_span) = last_lifetime_span {
+ (last_lifetime_span.shrink_to_hi(), ", ", "")
+ } else if let Some(first_param_span) = first_param_span {
+ (first_param_span.shrink_to_lo(), "", ", ")
+ } else {
+ // If we have no args, then have `use<>` and need to fall back to using
+ // span math. This sucks, but should be reliable due to the construction
+ // of the `use<>` span.
+ (span.with_hi(span.hi() - BytePos(1)).shrink_to_hi(), "", "")
+ };
+
+ diag.subdiagnostic(errors::AddPreciseCapturing::Existing { span, new_lifetime, pre, post });
+ } else {
+ let mut captured_lifetimes = FxIndexSet::default();
+ let mut captured_non_lifetimes = FxIndexSet::default();
+
+ let variances = tcx.variances_of(opaque_def_id);
+ let mut generics = tcx.generics_of(opaque_def_id);
+ loop {
+ for param in &generics.own_params {
+ if variances[param.index as usize] == ty::Bivariant {
+ continue;
+ }
+
+ match param.kind {
+ ty::GenericParamDefKind::Lifetime => {
+ captured_lifetimes.insert(param.name);
+ }
+ ty::GenericParamDefKind::Type { synthetic: true, .. } => {
+ // FIXME: We can't provide a good suggestion for
+ // `use<...>` if we have an APIT. Bail for now.
+ return;
+ }
+ ty::GenericParamDefKind::Type { .. }
+ | ty::GenericParamDefKind::Const { .. } => {
+ captured_non_lifetimes.insert(param.name);
+ }
+ }
+ }
+
+ if let Some(parent) = generics.parent {
+ generics = tcx.generics_of(parent);
+ } else {
+ break;
+ }
+ }
+
+ if !captured_lifetimes.insert(new_lifetime) {
+ // Uh, strange. This lifetime appears to already be captured...
+ return;
+ }
+
+ let concatenated_bounds = captured_lifetimes
+ .into_iter()
+ .chain(captured_non_lifetimes)
+ .map(|sym| sym.to_string())
+ .collect::>()
+ .join(", ");
+
+ diag.subdiagnostic(errors::AddPreciseCapturing::New {
+ span: tcx.def_span(opaque_def_id).shrink_to_hi(),
+ new_lifetime,
+ concatenated_bounds,
+ });
+ }
+}
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index e2ba75dfd190..7d7a6a08bee4 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -691,6 +691,7 @@ fn test_unstable_options_tracking_hash() {
untracked!(dump_mir, Some(String::from("abc")));
untracked!(dump_mir_dataflow, true);
untracked!(dump_mir_dir, String::from("abc"));
+ untracked!(dump_mir_exclude_alloc_bytes, true);
untracked!(dump_mir_exclude_pass_number, true);
untracked!(dump_mir_graphviz, true);
untracked!(dump_mono_stats, SwitchWithOptPath::Enabled(Some("mono-items-dir/".into())));
diff --git a/compiler/rustc_lint/messages.ftl b/compiler/rustc_lint/messages.ftl
index 4f2d59b4c663..de04d882f516 100644
--- a/compiler/rustc_lint/messages.ftl
+++ b/compiler/rustc_lint/messages.ftl
@@ -52,10 +52,6 @@ lint_builtin_allow_internal_unsafe =
lint_builtin_anonymous_params = anonymous parameters are deprecated and will be removed in the next edition
.suggestion = try naming the parameter or explicitly ignoring it
-lint_builtin_asm_labels = avoid using named labels in inline assembly
- .help = only local labels of the form `:` should be used in inline asm
- .note = see the asm section of Rust By Example for more information
-
lint_builtin_clashing_extern_diff_name = `{$this}` redeclares `{$orig}` with a different signature
.previous_decl_label = `{$orig}` previously declared here
.mismatch_label = this signature doesn't match the previous declaration
@@ -163,6 +159,8 @@ lint_builtin_unreachable_pub = unreachable `pub` {$what}
lint_builtin_unsafe_block = usage of an `unsafe` block
+lint_builtin_unsafe_extern_block = usage of an `unsafe extern` block
+
lint_builtin_unsafe_impl = implementation of an `unsafe` trait
lint_builtin_unsafe_trait = declaration of an `unsafe` trait
@@ -403,6 +401,19 @@ lint_incomplete_include =
lint_inner_macro_attribute_unstable = inner macro attributes are unstable
+lint_invalid_asm_label_binary = avoid using labels containing only the digits `0` and `1` in inline assembly
+ .label = use a different label that doesn't start with `0` or `1`
+ .note = an LLVM bug makes these labels ambiguous with a binary literal number
+ .note = see for more information
+
+lint_invalid_asm_label_format_arg = avoid using named labels in inline assembly
+ .help = only local labels of the form `:` should be used in inline asm
+ .note1 = format arguments may expand to a non-numeric value
+ .note2 = see the asm section of Rust By Example for more information
+lint_invalid_asm_label_named = avoid using named labels in inline assembly
+ .help = only local labels of the form `:` should be used in inline asm
+ .note = see the asm section of Rust By Example for more information
+lint_invalid_asm_label_no_span = the label may be declared in the expansion of a macro
lint_invalid_crate_type_value = invalid `crate_type` value
.suggestion = did you mean
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index 79c8046f9b74..485c214ac9de 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -30,13 +30,13 @@ use crate::{
BuiltinExplicitOutlivesSuggestion, BuiltinFeatureIssueNote, BuiltinIncompleteFeatures,
BuiltinIncompleteFeaturesHelp, BuiltinInternalFeatures, BuiltinKeywordIdents,
BuiltinMissingCopyImpl, BuiltinMissingDebugImpl, BuiltinMissingDoc,
- BuiltinMutablesTransmutes, BuiltinNamedAsmLabel, BuiltinNoMangleGeneric,
- BuiltinNonShorthandFieldPatterns, BuiltinSpecialModuleNameUsed, BuiltinTrivialBounds,
- BuiltinTypeAliasGenericBounds, BuiltinTypeAliasGenericBoundsSuggestion,
- BuiltinTypeAliasWhereClause, BuiltinUngatedAsyncFnTrackCaller, BuiltinUnpermittedTypeInit,
+ BuiltinMutablesTransmutes, BuiltinNoMangleGeneric, BuiltinNonShorthandFieldPatterns,
+ BuiltinSpecialModuleNameUsed, BuiltinTrivialBounds, BuiltinTypeAliasGenericBounds,
+ BuiltinTypeAliasGenericBoundsSuggestion, BuiltinTypeAliasWhereClause,
+ BuiltinUngatedAsyncFnTrackCaller, BuiltinUnpermittedTypeInit,
BuiltinUnpermittedTypeInitSub, BuiltinUnreachablePub, BuiltinUnsafe,
BuiltinUnstableFeatures, BuiltinUnusedDocComment, BuiltinUnusedDocCommentSub,
- BuiltinWhileTrue, SuggestChangingAssocTypes,
+ BuiltinWhileTrue, InvalidAsmLabel, SuggestChangingAssocTypes,
},
EarlyContext, EarlyLintPass, LateContext, LateLintPass, Level, LintContext,
};
@@ -45,7 +45,7 @@ use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_ast::visit::{FnCtxt, FnKind};
use rustc_ast::{self as ast, *};
use rustc_ast_pretty::pprust::{self, expr_to_string};
-use rustc_errors::{Applicability, LintDiagnostic, MultiSpan};
+use rustc_errors::{Applicability, LintDiagnostic};
use rustc_feature::{deprecated_attributes, AttributeGate, BuiltinAttribute, GateIssue, Stability};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
@@ -69,7 +69,6 @@ use rustc_target::abi::Abi;
use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt};
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
use rustc_trait_selection::traits::{self, misc::type_allowed_to_implement_copy};
-use tracing::debug;
use crate::nonstandard_style::{method_context, MethodLateContext};
@@ -326,6 +325,12 @@ impl EarlyLintPass for UnsafeCode {
self.report_unsafe(cx, it.span, BuiltinUnsafe::GlobalAsm);
}
+ ast::ItemKind::ForeignMod(ForeignMod { safety, .. }) => {
+ if let Safety::Unsafe(_) = safety {
+ self.report_unsafe(cx, it.span, BuiltinUnsafe::UnsafeExternBlock);
+ }
+ }
+
_ => {}
}
}
@@ -2728,10 +2733,52 @@ declare_lint! {
"named labels in inline assembly",
}
-declare_lint_pass!(NamedAsmLabels => [NAMED_ASM_LABELS]);
+declare_lint! {
+ /// The `binary_asm_labels` lint detects the use of numeric labels containing only binary
+ /// digits in the inline `asm!` macro.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// # #![feature(asm_experimental_arch)]
+ /// use std::arch::asm;
+ ///
+ /// fn main() {
+ /// unsafe {
+ /// asm!("0: jmp 0b");
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A [LLVM bug] causes this code to fail to compile because it interprets the `0b` as a binary
+ /// literal instead of a reference to the previous local label `0`. Note that even though the
+ /// bug is marked as fixed, it only fixes a specific usage of intel syntax within standalone
+ /// files, not inline assembly. To work around this bug, don't use labels that could be
+ /// confused with a binary literal.
+ ///
+ /// See the explanation in [Rust By Example] for more details.
+ ///
+ /// [LLVM bug]: https://bugs.llvm.org/show_bug.cgi?id=36144
+ /// [Rust By Example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html#labels
+ pub BINARY_ASM_LABELS,
+ Deny,
+ "labels in inline assembly containing only 0 or 1 digits",
+}
-impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
- #[allow(rustc::diagnostic_outside_of_impl)]
+declare_lint_pass!(AsmLabels => [NAMED_ASM_LABELS, BINARY_ASM_LABELS]);
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+enum AsmLabelKind {
+ Named,
+ FormatArg,
+ Binary,
+}
+
+impl<'tcx> LateLintPass<'tcx> for AsmLabels {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
if let hir::Expr {
kind: hir::ExprKind::InlineAsm(hir::InlineAsm { template_strs, options, .. }),
@@ -2759,7 +2806,8 @@ impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
None
};
- let mut found_labels = Vec::new();
+ // diagnostics are emitted per-template, so this is created here as opposed to the outer loop
+ let mut spans = Vec::new();
// A semicolon might not actually be specified as a separator for all targets, but
// it seems like LLVM accepts it always.
@@ -2782,16 +2830,21 @@ impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
// Whether a { bracket has been seen and its } hasn't been found yet.
let mut in_bracket = false;
+ let mut label_kind = AsmLabelKind::Named;
- // A label starts with an ASCII alphabetic character or . or _
// A label can also start with a format arg, if it's not a raw asm block.
if !raw && start == '{' {
in_bracket = true;
+ label_kind = AsmLabelKind::FormatArg;
+ } else if matches!(start, '0' | '1') {
+ // Binary labels have only the characters `0` or `1`.
+ label_kind = AsmLabelKind::Binary;
} else if !(start.is_ascii_alphabetic() || matches!(start, '.' | '_')) {
+ // Named labels start with ASCII letters, `.` or `_`.
+ // anything else is not a label
break 'label_loop;
}
- // Labels continue with ASCII alphanumeric characters, _, or $
for c in chars {
// Inside a template format arg, any character is permitted for the
// puproses of label detection because we assume that it can be
@@ -2812,8 +2865,18 @@ impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
} else if !raw && c == '{' {
// Start of a format arg.
in_bracket = true;
+ label_kind = AsmLabelKind::FormatArg;
} else {
- if !(c.is_ascii_alphanumeric() || matches!(c, '_' | '$')) {
+ let can_continue = match label_kind {
+ // Format arg labels are considered to be named labels for the purposes
+ // of continuing outside of their {} pair.
+ AsmLabelKind::Named | AsmLabelKind::FormatArg => {
+ c.is_ascii_alphanumeric() || matches!(c, '_' | '$')
+ }
+ AsmLabelKind::Binary => matches!(c, '0' | '1'),
+ };
+
+ if !can_continue {
// The potential label had an invalid character inside it, it
// cannot be a label.
break 'label_loop;
@@ -2821,25 +2884,41 @@ impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
}
}
- // If all characters passed the label checks, this is likely a label.
- found_labels.push(possible_label);
+ // If all characters passed the label checks, this is a label.
+ spans.push((find_label_span(possible_label), label_kind));
start_idx = idx + 1;
}
}
- debug!("NamedAsmLabels::check_expr(): found_labels: {:#?}", &found_labels);
-
- if found_labels.len() > 0 {
- let spans = found_labels
- .into_iter()
- .filter_map(|label| find_label_span(label))
- .collect::>();
- // If there were labels but we couldn't find a span, combine the warnings and
- // use the template span.
- let target_spans: MultiSpan =
- if spans.len() > 0 { spans.into() } else { (*template_span).into() };
-
- cx.emit_span_lint(NAMED_ASM_LABELS, target_spans, BuiltinNamedAsmLabel);
+ for (span, label_kind) in spans {
+ let missing_precise_span = span.is_none();
+ let span = span.unwrap_or(*template_span);
+ match label_kind {
+ AsmLabelKind::Named => {
+ cx.emit_span_lint(
+ NAMED_ASM_LABELS,
+ span,
+ InvalidAsmLabel::Named { missing_precise_span },
+ );
+ }
+ AsmLabelKind::FormatArg => {
+ cx.emit_span_lint(
+ NAMED_ASM_LABELS,
+ span,
+ InvalidAsmLabel::FormatArg { missing_precise_span },
+ );
+ }
+ AsmLabelKind::Binary => {
+ // the binary asm issue only occurs when using intel syntax
+ if !options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ cx.emit_span_lint(
+ BINARY_ASM_LABELS,
+ span,
+ InvalidAsmLabel::Binary { missing_precise_span, span },
+ )
+ }
+ }
+ };
}
}
}
diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs
index b6927cf60b69..8be8996e4c8f 100644
--- a/compiler/rustc_lint/src/lib.rs
+++ b/compiler/rustc_lint/src/lib.rs
@@ -225,7 +225,7 @@ late_lint_methods!(
NoopMethodCall: NoopMethodCall,
EnumIntrinsicsNonEnums: EnumIntrinsicsNonEnums,
InvalidAtomicOrdering: InvalidAtomicOrdering,
- NamedAsmLabels: NamedAsmLabels,
+ AsmLabels: AsmLabels,
OpaqueHiddenInferredBound: OpaqueHiddenInferredBound,
MultipleSupertraitUpcastable: MultipleSupertraitUpcastable,
MapUnitFn: MapUnitFn,
diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs
index 54c73710eca6..308bb73f4cea 100644
--- a/compiler/rustc_lint/src/lints.rs
+++ b/compiler/rustc_lint/src/lints.rs
@@ -81,6 +81,8 @@ pub enum BuiltinUnsafe {
AllowInternalUnsafe,
#[diag(lint_builtin_unsafe_block)]
UnsafeBlock,
+ #[diag(lint_builtin_unsafe_extern_block)]
+ UnsafeExternBlock,
#[diag(lint_builtin_unsafe_trait)]
UnsafeTrait,
#[diag(lint_builtin_unsafe_impl)]
@@ -2047,10 +2049,32 @@ pub struct UnitBindingsDiag {
}
#[derive(LintDiagnostic)]
-#[diag(lint_builtin_asm_labels)]
-#[help]
-#[note]
-pub struct BuiltinNamedAsmLabel;
+pub enum InvalidAsmLabel {
+ #[diag(lint_invalid_asm_label_named)]
+ #[help]
+ #[note]
+ Named {
+ #[note(lint_invalid_asm_label_no_span)]
+ missing_precise_span: bool,
+ },
+ #[diag(lint_invalid_asm_label_format_arg)]
+ #[help]
+ #[note(lint_note1)]
+ #[note(lint_note2)]
+ FormatArg {
+ #[note(lint_invalid_asm_label_no_span)]
+ missing_precise_span: bool,
+ },
+ #[diag(lint_invalid_asm_label_binary)]
+ #[note]
+ Binary {
+ #[note(lint_invalid_asm_label_no_span)]
+ missing_precise_span: bool,
+ // hack to get a label on the whole span, must match the emitted span
+ #[label]
+ span: Span,
+ },
+}
#[derive(Subdiagnostic)]
pub enum UnexpectedCfgCargoHelp {
diff --git a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
index 5ee73dbfdc65..fdb71ad41a75 100644
--- a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
+++ b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
@@ -5,8 +5,7 @@ use rustc_middle::ty::print::{PrintTraitPredicateExt as _, TraitPredPrintModifie
use rustc_middle::ty::{self, fold::BottomUpFolder, Ty, TypeFoldable};
use rustc_session::{declare_lint, declare_lint_pass};
use rustc_span::{symbol::kw, Span};
-use rustc_trait_selection::traits;
-use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+use rustc_trait_selection::traits::{self, ObligationCtxt};
use crate::{LateContext, LateLintPass, LintContext};
@@ -130,24 +129,26 @@ impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
.iter_instantiated_copied(cx.tcx, proj.projection_term.args)
{
let assoc_pred = assoc_pred.fold_with(proj_replacer);
- let Ok(assoc_pred) = traits::fully_normalize(
- infcx,
- traits::ObligationCause::dummy(),
- cx.param_env,
- assoc_pred,
- ) else {
- continue;
- };
- // If that predicate doesn't hold modulo regions (but passed during type-check),
- // then we must've taken advantage of the hack in `project_and_unify_types` where
- // we replace opaques with inference vars. Emit a warning!
- if !infcx.predicate_must_hold_modulo_regions(&traits::Obligation::new(
+ let ocx = ObligationCtxt::new(infcx);
+ let assoc_pred =
+ ocx.normalize(&traits::ObligationCause::dummy(), cx.param_env, assoc_pred);
+ if !ocx.select_all_or_error().is_empty() {
+ // Can't normalize for some reason...?
+ continue;
+ }
+
+ ocx.register_obligation(traits::Obligation::new(
cx.tcx,
traits::ObligationCause::dummy(),
cx.param_env,
assoc_pred,
- )) {
+ ));
+
+ // If that predicate doesn't hold modulo regions (but passed during type-check),
+ // then we must've taken advantage of the hack in `project_and_unify_types` where
+ // we replace opaques with inference vars. Emit a warning!
+ if !ocx.select_all_or_error().is_empty() {
// If it's a trait bound and an opaque that doesn't satisfy it,
// then we can emit a suggestion to add the bound.
let add_bound = match (proj_term.kind(), assoc_pred.kind().skip_binder()) {
diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs
index 3aa852c83045..4c1f78e6bee3 100644
--- a/compiler/rustc_llvm/build.rs
+++ b/compiler/rustc_llvm/build.rs
@@ -197,9 +197,8 @@ fn main() {
cfg.define("LLVM_RUSTLLVM", None);
}
- if tracked_env_var_os("LLVM_NDEBUG").is_some() {
+ if tracked_env_var_os("LLVM_ASSERTIONS").is_none() {
cfg.define("NDEBUG", None);
- cfg.debug(false);
}
rerun_if_changed_anything_in_dir(Path::new("llvm-wrapper"));
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index b6790b7df500..14757b27a375 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -1137,20 +1137,15 @@ LLVMRustDIBuilderGetOrCreateArray(LLVMRustDIBuilderRef Builder,
Builder->getOrCreateArray(ArrayRef(DataValue, Count)).get());
}
-extern "C" LLVMValueRef LLVMRustDIBuilderInsertDeclareAtEnd(
+extern "C" void LLVMRustDIBuilderInsertDeclareAtEnd(
LLVMRustDIBuilderRef Builder, LLVMValueRef V, LLVMMetadataRef VarInfo,
uint64_t *AddrOps, unsigned AddrOpsCount, LLVMMetadataRef DL,
LLVMBasicBlockRef InsertAtEnd) {
- auto Result = Builder->insertDeclare(
- unwrap(V), unwrap(VarInfo),
- Builder->createExpression(
- llvm::ArrayRef(AddrOps, AddrOpsCount)),
- DebugLoc(cast(unwrap(DL))), unwrap(InsertAtEnd));
-#if LLVM_VERSION_GE(19, 0)
- return wrap(Result.get());
-#else
- return wrap(Result);
-#endif
+ Builder->insertDeclare(unwrap(V), unwrap(VarInfo),
+ Builder->createExpression(
+ llvm::ArrayRef(AddrOps, AddrOpsCount)),
+ DebugLoc(cast(unwrap(DL))),
+ unwrap(InsertAtEnd));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerator(
diff --git a/compiler/rustc_macros/src/diagnostics/diagnostic.rs b/compiler/rustc_macros/src/diagnostics/diagnostic.rs
index a3abbdcf18ca..2743660ab891 100644
--- a/compiler/rustc_macros/src/diagnostics/diagnostic.rs
+++ b/compiler/rustc_macros/src/diagnostics/diagnostic.rs
@@ -71,6 +71,8 @@ impl<'a> DiagnosticDerive<'a> {
});
// A lifetime of `'a` causes conflicts, but `_sess` is fine.
+ // FIXME(edition_2024): Fix the `keyword_idents_2024` lint to not trigger here?
+ #[allow(keyword_idents_2024)]
let mut imp = structure.gen_impl(quote! {
gen impl<'_sess, G> rustc_errors::Diagnostic<'_sess, G> for @Self
where G: rustc_errors::EmissionGuarantee
@@ -148,6 +150,8 @@ impl<'a> LintDiagnosticDerive<'a> {
}
});
+ // FIXME(edition_2024): Fix the `keyword_idents_2024` lint to not trigger here?
+ #[allow(keyword_idents_2024)]
let mut imp = structure.gen_impl(quote! {
gen impl<'__a> rustc_errors::LintDiagnostic<'__a, ()> for @Self {
#[track_caller]
diff --git a/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
index 46bd80c2df64..f93d89d6c0f0 100644
--- a/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
+++ b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
@@ -269,6 +269,7 @@ impl DiagnosticDeriveVariantBuilder {
let field_binding = &binding_info.binding;
let inner_ty = FieldInnerTy::from_type(&field.ty);
+ let mut seen_label = false;
field
.attrs
@@ -280,6 +281,14 @@ impl DiagnosticDeriveVariantBuilder {
}
let name = attr.path().segments.last().unwrap().ident.to_string();
+
+ if name == "primary_span" && seen_label {
+ span_err(attr.span().unwrap(), format!("`#[primary_span]` must be placed before labels, since it overwrites the span of the diagnostic")).emit();
+ }
+ if name == "label" {
+ seen_label = true;
+ }
+
let needs_clone =
name == "primary_span" && matches!(inner_ty, FieldInnerTy::Vec(_));
let (binding, needs_destructure) = if needs_clone {
diff --git a/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
index 69014f39925a..7f090f5ebc16 100644
--- a/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
+++ b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
@@ -86,6 +86,9 @@ impl SubdiagnosticDerive {
let diag = &self.diag;
let f = &self.f;
+
+ // FIXME(edition_2024): Fix the `keyword_idents_2024` lint to not trigger here?
+ #[allow(keyword_idents_2024)]
let ret = structure.gen_impl(quote! {
gen impl rustc_errors::Subdiagnostic for @Self {
fn add_to_diag_with<__G, __F>(
@@ -100,6 +103,7 @@ impl SubdiagnosticDerive {
}
}
});
+
ret
}
}
diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs
index beaaadd497d3..2a593340849e 100644
--- a/compiler/rustc_middle/src/mir/coverage.rs
+++ b/compiler/rustc_middle/src/mir/coverage.rs
@@ -220,19 +220,6 @@ pub enum MappingKind {
}
impl MappingKind {
- /// Iterator over all coverage terms in this mapping kind.
- pub fn terms(&self) -> impl Iterator- {
- let zero = || None.into_iter().chain(None);
- let one = |a| Some(a).into_iter().chain(None);
- let two = |a, b| Some(a).into_iter().chain(Some(b));
- match *self {
- Self::Code(term) => one(term),
- Self::Branch { true_term, false_term } => two(true_term, false_term),
- Self::MCDCBranch { true_term, false_term, .. } => two(true_term, false_term),
- Self::MCDCDecision(_) => zero(),
- }
- }
-
/// Returns a copy of this mapping kind, in which all coverage terms have
/// been replaced with ones returned by the given function.
pub fn map_terms(&self, map_fn: impl Fn(CovTerm) -> CovTerm) -> Self {
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 4e95e600b5ab..bdd1eb11a38e 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -393,7 +393,6 @@ pub(crate) struct AllocMap<'tcx> {
alloc_map: FxHashMap>,
/// Used to ensure that statics and functions only get one associated `AllocId`.
- /// Should never contain a `GlobalAlloc::Memory`!
//
// FIXME: Should we just have two separate dedup maps for statics and functions each?
dedup: FxHashMap, AllocId>,
@@ -433,13 +432,13 @@ impl<'tcx> TyCtxt<'tcx> {
}
/// Reserves a new ID *if* this allocation has not been dedup-reserved before.
- /// Should only be used for "symbolic" allocations (function pointers, vtables, statics), we
- /// don't want to dedup IDs for "real" memory!
+ /// Should not be used for mutable memory.
fn reserve_and_set_dedup(self, alloc: GlobalAlloc<'tcx>) -> AllocId {
let mut alloc_map = self.alloc_map.lock();
- match alloc {
- GlobalAlloc::Function { .. } | GlobalAlloc::Static(..) | GlobalAlloc::VTable(..) => {}
- GlobalAlloc::Memory(..) => bug!("Trying to dedup-reserve memory with real data!"),
+ if let GlobalAlloc::Memory(mem) = alloc {
+ if mem.inner().mutability.is_mut() {
+ bug!("trying to dedup-reserve mutable memory");
+ }
}
if let Some(&alloc_id) = alloc_map.dedup.get(&alloc) {
return alloc_id;
@@ -451,6 +450,12 @@ impl<'tcx> TyCtxt<'tcx> {
id
}
+ /// Generates an `AllocId` for a memory allocation. If the exact same memory has been
+ /// allocated before, this will return the same `AllocId`.
+ pub fn reserve_and_set_memory_dedup(self, mem: ConstAllocation<'tcx>) -> AllocId {
+ self.reserve_and_set_dedup(GlobalAlloc::Memory(mem))
+ }
+
/// Generates an `AllocId` for a static or return a cached one in case this function has been
/// called on the same static before.
pub fn reserve_and_set_static_alloc(self, static_id: DefId) -> AllocId {
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 82625ae3d474..223249952dc7 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -1545,6 +1545,9 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> std::fmt::Display
// We are done.
return write!(w, " {{}}");
}
+ if tcx.sess.opts.unstable_opts.dump_mir_exclude_alloc_bytes {
+ return write!(w, " {{ .. }}");
+ }
// Write allocation bytes.
writeln!(w, " {{")?;
write_allocation_bytes(tcx, alloc, w, " ")?;
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index 0d3c419748b7..0031ded24406 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -1016,14 +1016,14 @@ macro_rules! extra_body_methods {
macro_rules! super_body {
($self:ident, $body:ident, $($mutability:ident, $invalidate:tt)?) => {
let span = $body.span;
- if let Some(gen) = &$($mutability)? $body.coroutine {
- if let Some(yield_ty) = $(& $mutability)? gen.yield_ty {
+ if let Some(coroutine) = &$($mutability)? $body.coroutine {
+ if let Some(yield_ty) = $(& $mutability)? coroutine.yield_ty {
$self.visit_ty(
yield_ty,
TyContext::YieldTy(SourceInfo::outermost(span))
);
}
- if let Some(resume_ty) = $(& $mutability)? gen.resume_ty {
+ if let Some(resume_ty) = $(& $mutability)? coroutine.resume_ty {
$self.visit_ty(
resume_ty,
TyContext::ResumeTy(SourceInfo::outermost(span))
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index 33c27d41d864..817c7157b682 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -2280,10 +2280,6 @@ rustc_queries! {
desc { "whether the item should be made inlinable across crates" }
separate_provide_extern
}
-
- query find_field((def_id, ident): (DefId, rustc_span::symbol::Ident)) -> Option {
- desc { |tcx| "find the index of maybe nested field `{ident}` in `{}`", tcx.def_path_str(def_id) }
- }
}
rustc_query_append! { define_callbacks! }
diff --git a/compiler/rustc_middle/src/traits/solve.rs b/compiler/rustc_middle/src/traits/solve.rs
index 7bc4c60f1027..f659bf8125a0 100644
--- a/compiler/rustc_middle/src/traits/solve.rs
+++ b/compiler/rustc_middle/src/traits/solve.rs
@@ -8,10 +8,6 @@ use crate::ty::{
self, FallibleTypeFolder, TyCtxt, TypeFoldable, TypeFolder, TypeVisitable, TypeVisitor,
};
-mod cache;
-
-pub use cache::EvaluationCache;
-
pub type Goal<'tcx, P> = ir::solve::Goal, P>;
pub type QueryInput<'tcx, P> = ir::solve::QueryInput, P>;
pub type QueryResult<'tcx> = ir::solve::QueryResult>;
diff --git a/compiler/rustc_middle/src/traits/solve/cache.rs b/compiler/rustc_middle/src/traits/solve/cache.rs
deleted file mode 100644
index 72a8d4eb4050..000000000000
--- a/compiler/rustc_middle/src/traits/solve/cache.rs
+++ /dev/null
@@ -1,121 +0,0 @@
-use super::{inspect, CanonicalInput, QueryResult};
-use crate::ty::TyCtxt;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_data_structures::sync::Lock;
-use rustc_query_system::cache::WithDepNode;
-use rustc_query_system::dep_graph::DepNodeIndex;
-use rustc_session::Limit;
-use rustc_type_ir::solve::CacheData;
-
-/// The trait solver cache used by `-Znext-solver`.
-///
-/// FIXME(@lcnr): link to some official documentation of how
-/// this works.
-#[derive(Default)]
-pub struct EvaluationCache<'tcx> {
- map: Lock, CacheEntry<'tcx>>>,
-}
-
-impl<'tcx> rustc_type_ir::inherent::EvaluationCache> for &'tcx EvaluationCache<'tcx> {
- /// Insert a final result into the global cache.
- fn insert(
- &self,
- tcx: TyCtxt<'tcx>,
- key: CanonicalInput<'tcx>,
- proof_tree: Option<&'tcx inspect::CanonicalGoalEvaluationStep>>,
- additional_depth: usize,
- encountered_overflow: bool,
- cycle_participants: FxHashSet>,
- dep_node: DepNodeIndex,
- result: QueryResult<'tcx>,
- ) {
- let mut map = self.map.borrow_mut();
- let entry = map.entry(key).or_default();
- let data = WithDepNode::new(dep_node, QueryData { result, proof_tree });
- entry.cycle_participants.extend(cycle_participants);
- if encountered_overflow {
- entry.with_overflow.insert(additional_depth, data);
- } else {
- entry.success = Some(Success { data, additional_depth });
- }
-
- if cfg!(debug_assertions) {
- drop(map);
- let expected = CacheData { result, proof_tree, additional_depth, encountered_overflow };
- let actual = self.get(tcx, key, [], additional_depth);
- if !actual.as_ref().is_some_and(|actual| expected == *actual) {
- bug!("failed to lookup inserted element for {key:?}: {expected:?} != {actual:?}");
- }
- }
- }
-
- /// Try to fetch a cached result, checking the recursion limit
- /// and handling root goals of coinductive cycles.
- ///
- /// If this returns `Some` the cache result can be used.
- fn get(
- &self,
- tcx: TyCtxt<'tcx>,
- key: CanonicalInput<'tcx>,
- stack_entries: impl IntoIterator
- >,
- available_depth: usize,
- ) -> Option>> {
- let map = self.map.borrow();
- let entry = map.get(&key)?;
-
- for stack_entry in stack_entries {
- if entry.cycle_participants.contains(&stack_entry) {
- return None;
- }
- }
-
- if let Some(ref success) = entry.success {
- if Limit(available_depth).value_within_limit(success.additional_depth) {
- let QueryData { result, proof_tree } = success.data.get(tcx);
- return Some(CacheData {
- result,
- proof_tree,
- additional_depth: success.additional_depth,
- encountered_overflow: false,
- });
- }
- }
-
- entry.with_overflow.get(&available_depth).map(|e| {
- let QueryData { result, proof_tree } = e.get(tcx);
- CacheData {
- result,
- proof_tree,
- additional_depth: available_depth,
- encountered_overflow: true,
- }
- })
- }
-}
-
-struct Success<'tcx> {
- data: WithDepNode>,
- additional_depth: usize,
-}
-
-#[derive(Clone, Copy)]
-pub struct QueryData<'tcx> {
- pub result: QueryResult<'tcx>,
- pub proof_tree: Option<&'tcx inspect::CanonicalGoalEvaluationStep>>,
-}
-
-/// The cache entry for a goal `CanonicalInput`.
-///
-/// This contains results whose computation never hit the
-/// recursion limit in `success`, and all results which hit
-/// the recursion limit in `with_overflow`.
-#[derive(Default)]
-struct CacheEntry<'tcx> {
- success: Option>,
- /// We have to be careful when caching roots of cycles.
- ///
- /// See the doc comment of `StackEntry::cycle_participants` for more
- /// details.
- cycle_participants: FxHashSet>,
- with_overflow: FxHashMap>>,
-}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index aee42bfe3aac..25070e6b042c 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -59,6 +59,7 @@ use rustc_hir::lang_items::LangItem;
use rustc_hir::{HirId, Node, TraitCandidate};
use rustc_index::IndexVec;
use rustc_macros::{HashStable, TyDecodable, TyEncodable};
+use rustc_query_system::cache::WithDepNode;
use rustc_query_system::dep_graph::DepNodeIndex;
use rustc_query_system::ich::StableHashingContext;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
@@ -75,7 +76,7 @@ use rustc_type_ir::fold::TypeFoldable;
use rustc_type_ir::lang_items::TraitSolverLangItem;
use rustc_type_ir::solve::SolverMode;
use rustc_type_ir::TyKind::*;
-use rustc_type_ir::{CollectAndApply, Interner, TypeFlags, WithCachedTypeInfo};
+use rustc_type_ir::{search_graph, CollectAndApply, Interner, TypeFlags, WithCachedTypeInfo};
use tracing::{debug, instrument};
use std::assert_matches::assert_matches;
@@ -164,12 +165,26 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
type Clause = Clause<'tcx>;
type Clauses = ty::Clauses<'tcx>;
- type EvaluationCache = &'tcx solve::EvaluationCache<'tcx>;
+ type Tracked = WithDepNode;
+ fn mk_tracked(
+ self,
+ data: T,
+ dep_node: DepNodeIndex,
+ ) -> Self::Tracked {
+ WithDepNode::new(dep_node, data)
+ }
+ fn get_tracked(self, tracked: &Self::Tracked) -> T {
+ tracked.get(self)
+ }
- fn evaluation_cache(self, mode: SolverMode) -> &'tcx solve::EvaluationCache<'tcx> {
+ fn with_global_cache(
+ self,
+ mode: SolverMode,
+ f: impl FnOnce(&mut search_graph::GlobalCache) -> R,
+ ) -> R {
match mode {
- SolverMode::Normal => &self.new_solver_evaluation_cache,
- SolverMode::Coherence => &self.new_solver_coherence_evaluation_cache,
+ SolverMode::Normal => f(&mut *self.new_solver_evaluation_cache.lock()),
+ SolverMode::Coherence => f(&mut *self.new_solver_coherence_evaluation_cache.lock()),
}
}
@@ -1283,8 +1298,8 @@ pub struct GlobalCtxt<'tcx> {
pub evaluation_cache: traits::EvaluationCache<'tcx>,
/// Caches the results of goal evaluation in the new solver.
- pub new_solver_evaluation_cache: solve::EvaluationCache<'tcx>,
- pub new_solver_coherence_evaluation_cache: solve::EvaluationCache<'tcx>,
+ pub new_solver_evaluation_cache: Lock>>,
+ pub new_solver_coherence_evaluation_cache: Lock>>,
pub canonical_param_env_cache: CanonicalParamEnvCache<'tcx>,
@@ -1427,11 +1442,12 @@ impl<'tcx> TyCtxt<'tcx> {
}
/// Allocates a read-only byte or string literal for `mir::interpret`.
- pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId {
+ /// Returns the same `AllocId` if called again with the same bytes.
+ pub fn allocate_bytes_dedup(self, bytes: &[u8]) -> interpret::AllocId {
// Create an allocation that just contains these bytes.
let alloc = interpret::Allocation::from_bytes_byte_aligned_immutable(bytes);
let alloc = self.mk_const_alloc(alloc);
- self.reserve_and_set_memory_alloc(alloc)
+ self.reserve_and_set_memory_dedup(alloc)
}
/// Returns a range of the start/end indices specified with the
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
index c50a98e88fd6..6e64e9bc4f87 100644
--- a/compiler/rustc_middle/src/ty/instance.rs
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -541,7 +541,9 @@ impl<'tcx> Instance<'tcx> {
// which means that rustc basically hangs.
//
// Bail out in these cases to avoid that bad user experience.
- if !tcx.type_length_limit().value_within_limit(type_length(args)) {
+ if tcx.sess.opts.unstable_opts.enforce_type_length_limit
+ && !tcx.type_length_limit().value_within_limit(type_length(args))
+ {
return Ok(None);
}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index eb25aecd9cef..22a6786665ca 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1212,7 +1212,6 @@ pub fn fn_can_unwind(tcx: TyCtxt<'_>, fn_def_id: Option, abi: SpecAbi) ->
| RiscvInterruptM
| RiscvInterruptS
| CCmseNonSecureCall
- | Wasm
| Unadjusted => false,
Rust | RustCall | RustCold | RustIntrinsic => {
tcx.sess.panic_strategy() == PanicStrategy::Unwind
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
index 3b69058d3cb4..be62a3d37365 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -140,7 +140,7 @@ fn lit_to_mir_constant<'tcx>(
ConstValue::Slice { data: allocation, meta: allocation.inner().size().bytes() }
}
(ast::LitKind::ByteStr(data, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
- let id = tcx.allocate_bytes(data);
+ let id = tcx.allocate_bytes_dedup(data);
ConstValue::Scalar(Scalar::from_pointer(id.into(), &tcx))
}
(ast::LitKind::CStr(data, _), ty::Ref(_, inner_ty, _)) if matches!(inner_ty.kind(), ty::Adt(def, _) if tcx.is_lang_item(def.did(), LangItem::CStr)) =>
diff --git a/compiler/rustc_mir_build/src/build/matches/match_pair.rs b/compiler/rustc_mir_build/src/build/matches/match_pair.rs
new file mode 100644
index 000000000000..2f540478674d
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/match_pair.rs
@@ -0,0 +1,254 @@
+use rustc_middle::mir::*;
+use rustc_middle::thir::{self, *};
+use rustc_middle::ty::{self, Ty, TypeVisitableExt};
+
+use crate::build::expr::as_place::{PlaceBase, PlaceBuilder};
+use crate::build::matches::{FlatPat, MatchPair, TestCase};
+use crate::build::Builder;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Builds and returns [`MatchPair`] trees, one for each pattern in
+ /// `subpatterns`, representing the fields of a [`PatKind::Variant`] or
+ /// [`PatKind::Leaf`].
+ ///
+ /// Used internally by [`MatchPair::new`].
+ fn field_match_pairs<'pat>(
+ &mut self,
+ place: PlaceBuilder<'tcx>,
+ subpatterns: &'pat [FieldPat<'tcx>],
+ ) -> Vec> {
+ subpatterns
+ .iter()
+ .map(|fieldpat| {
+ let place =
+ place.clone_project(PlaceElem::Field(fieldpat.field, fieldpat.pattern.ty));
+ MatchPair::new(place, &fieldpat.pattern, self)
+ })
+ .collect()
+ }
+
+ /// Builds [`MatchPair`] trees for the prefix/middle/suffix parts of an
+ /// array pattern or slice pattern, and adds those trees to `match_pairs`.
+ ///
+ /// Used internally by [`MatchPair::new`].
+ fn prefix_slice_suffix<'pat>(
+ &mut self,
+ match_pairs: &mut Vec>,
+ place: &PlaceBuilder<'tcx>,
+ prefix: &'pat [Box>],
+ opt_slice: &'pat Option>>,
+ suffix: &'pat [Box>],
+ ) {
+ let tcx = self.tcx;
+ let (min_length, exact_size) = if let Some(place_resolved) = place.try_to_place(self) {
+ match place_resolved.ty(&self.local_decls, tcx).ty.kind() {
+ ty::Array(_, length) => (length.eval_target_usize(tcx, self.param_env), true),
+ _ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
+ }
+ } else {
+ ((prefix.len() + suffix.len()).try_into().unwrap(), false)
+ };
+
+ match_pairs.extend(prefix.iter().enumerate().map(|(idx, subpattern)| {
+ let elem =
+ ProjectionElem::ConstantIndex { offset: idx as u64, min_length, from_end: false };
+ MatchPair::new(place.clone_project(elem), subpattern, self)
+ }));
+
+ if let Some(subslice_pat) = opt_slice {
+ let suffix_len = suffix.len() as u64;
+ let subslice = place.clone_project(PlaceElem::Subslice {
+ from: prefix.len() as u64,
+ to: if exact_size { min_length - suffix_len } else { suffix_len },
+ from_end: !exact_size,
+ });
+ match_pairs.push(MatchPair::new(subslice, subslice_pat, self));
+ }
+
+ match_pairs.extend(suffix.iter().rev().enumerate().map(|(idx, subpattern)| {
+ let end_offset = (idx + 1) as u64;
+ let elem = ProjectionElem::ConstantIndex {
+ offset: if exact_size { min_length - end_offset } else { end_offset },
+ min_length,
+ from_end: !exact_size,
+ };
+ let place = place.clone_project(elem);
+ MatchPair::new(place, subpattern, self)
+ }));
+ }
+}
+
+impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
+ /// Recursively builds a `MatchPair` tree for the given pattern and its
+ /// subpatterns.
+ pub(in crate::build) fn new(
+ mut place_builder: PlaceBuilder<'tcx>,
+ pattern: &'pat Pat<'tcx>,
+ cx: &mut Builder<'_, 'tcx>,
+ ) -> MatchPair<'pat, 'tcx> {
+ // Force the place type to the pattern's type.
+ // FIXME(oli-obk): can we use this to simplify slice/array pattern hacks?
+ if let Some(resolved) = place_builder.resolve_upvar(cx) {
+ place_builder = resolved;
+ }
+
+ // Only add the OpaqueCast projection if the given place is an opaque type and the
+ // expected type from the pattern is not.
+ let may_need_cast = match place_builder.base() {
+ PlaceBase::Local(local) => {
+ let ty =
+ Place::ty_from(local, place_builder.projection(), &cx.local_decls, cx.tcx).ty;
+ ty != pattern.ty && ty.has_opaque_types()
+ }
+ _ => true,
+ };
+ if may_need_cast {
+ place_builder = place_builder.project(ProjectionElem::OpaqueCast(pattern.ty));
+ }
+
+ let place = place_builder.try_to_place(cx);
+ let default_irrefutable = || TestCase::Irrefutable { binding: None, ascription: None };
+ let mut subpairs = Vec::new();
+ let test_case = match pattern.kind {
+ PatKind::Wild | PatKind::Error(_) => default_irrefutable(),
+
+ PatKind::Or { ref pats } => TestCase::Or {
+ pats: pats.iter().map(|pat| FlatPat::new(place_builder.clone(), pat, cx)).collect(),
+ },
+
+ PatKind::Range(ref range) => {
+ if range.is_full_range(cx.tcx) == Some(true) {
+ default_irrefutable()
+ } else {
+ TestCase::Range(range)
+ }
+ }
+
+ PatKind::Constant { value } => TestCase::Constant { value },
+
+ PatKind::AscribeUserType {
+ ascription: thir::Ascription { ref annotation, variance },
+ ref subpattern,
+ ..
+ } => {
+ // Apply the type ascription to the value at `match_pair.place`
+ let ascription = place.map(|source| super::Ascription {
+ annotation: annotation.clone(),
+ source,
+ variance,
+ });
+
+ subpairs.push(MatchPair::new(place_builder, subpattern, cx));
+ TestCase::Irrefutable { ascription, binding: None }
+ }
+
+ PatKind::Binding { mode, var, ref subpattern, .. } => {
+ let binding = place.map(|source| super::Binding {
+ span: pattern.span,
+ source,
+ var_id: var,
+ binding_mode: mode,
+ });
+
+ if let Some(subpattern) = subpattern.as_ref() {
+ // this is the `x @ P` case; have to keep matching against `P` now
+ subpairs.push(MatchPair::new(place_builder, subpattern, cx));
+ }
+ TestCase::Irrefutable { ascription: None, binding }
+ }
+
+ PatKind::InlineConstant { subpattern: ref pattern, def, .. } => {
+ // Apply a type ascription for the inline constant to the value at `match_pair.place`
+ let ascription = place.map(|source| {
+ let span = pattern.span;
+ let parent_id = cx.tcx.typeck_root_def_id(cx.def_id.to_def_id());
+ let args = ty::InlineConstArgs::new(
+ cx.tcx,
+ ty::InlineConstArgsParts {
+ parent_args: ty::GenericArgs::identity_for_item(cx.tcx, parent_id),
+ ty: cx.infcx.next_ty_var(span),
+ },
+ )
+ .args;
+ let user_ty = cx.infcx.canonicalize_user_type_annotation(ty::UserType::TypeOf(
+ def.to_def_id(),
+ ty::UserArgs { args, user_self_ty: None },
+ ));
+ let annotation = ty::CanonicalUserTypeAnnotation {
+ inferred_ty: pattern.ty,
+ span,
+ user_ty: Box::new(user_ty),
+ };
+ super::Ascription { annotation, source, variance: ty::Contravariant }
+ });
+
+ subpairs.push(MatchPair::new(place_builder, pattern, cx));
+ TestCase::Irrefutable { ascription, binding: None }
+ }
+
+ PatKind::Array { ref prefix, ref slice, ref suffix } => {
+ cx.prefix_slice_suffix(&mut subpairs, &place_builder, prefix, slice, suffix);
+ default_irrefutable()
+ }
+ PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+ cx.prefix_slice_suffix(&mut subpairs, &place_builder, prefix, slice, suffix);
+
+ if prefix.is_empty() && slice.is_some() && suffix.is_empty() {
+ default_irrefutable()
+ } else {
+ TestCase::Slice {
+ len: prefix.len() + suffix.len(),
+ variable_length: slice.is_some(),
+ }
+ }
+ }
+
+ PatKind::Variant { adt_def, variant_index, args, ref subpatterns } => {
+ let downcast_place = place_builder.downcast(adt_def, variant_index); // `(x as Variant)`
+ subpairs = cx.field_match_pairs(downcast_place, subpatterns);
+
+ let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| {
+ i == variant_index || {
+ (cx.tcx.features().exhaustive_patterns
+ || cx.tcx.features().min_exhaustive_patterns)
+ && !v
+ .inhabited_predicate(cx.tcx, adt_def)
+ .instantiate(cx.tcx, args)
+ .apply_ignore_module(cx.tcx, cx.param_env)
+ }
+ }) && (adt_def.did().is_local()
+ || !adt_def.is_variant_list_non_exhaustive());
+ if irrefutable {
+ default_irrefutable()
+ } else {
+ TestCase::Variant { adt_def, variant_index }
+ }
+ }
+
+ PatKind::Leaf { ref subpatterns } => {
+ subpairs = cx.field_match_pairs(place_builder, subpatterns);
+ default_irrefutable()
+ }
+
+ PatKind::Deref { ref subpattern } => {
+ subpairs.push(MatchPair::new(place_builder.deref(), subpattern, cx));
+ default_irrefutable()
+ }
+
+ PatKind::DerefPattern { ref subpattern, mutability } => {
+ // Create a new temporary for each deref pattern.
+ // FIXME(deref_patterns): dedup temporaries to avoid multiple `deref()` calls?
+ let temp = cx.temp(
+ Ty::new_ref(cx.tcx, cx.tcx.lifetimes.re_erased, subpattern.ty, mutability),
+ pattern.span,
+ );
+ subpairs.push(MatchPair::new(PlaceBuilder::from(temp).deref(), subpattern, cx));
+ TestCase::Deref { temp, mutability }
+ }
+
+ PatKind::Never => TestCase::Never,
+ };
+
+ MatchPair { place, test_case, subpairs, pattern }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
index 5695c881ecc2..98de4df3ce39 100644
--- a/compiler/rustc_mir_build/src/build/matches/mod.rs
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -24,6 +24,7 @@ use tracing::{debug, instrument};
use util::visit_bindings;
// helper functions, broken out by category:
+mod match_pair;
mod simplify;
mod test;
mod util;
@@ -1119,6 +1120,11 @@ impl<'tcx, 'pat> Candidate<'pat, 'tcx> {
}
}
+ /// Returns whether the first match pair of this candidate is an or-pattern.
+ fn starts_with_or_pattern(&self) -> bool {
+ matches!(&*self.match_pairs, [MatchPair { test_case: TestCase::Or { .. }, .. }, ..])
+ }
+
/// Visit the leaf candidates (those with no subcandidates) contained in
/// this candidate.
fn visit_leaves<'a>(&'a mut self, mut visit_leaf: impl FnMut(&'a mut Self)) {
@@ -1190,17 +1196,27 @@ impl<'pat, 'tcx> TestCase<'pat, 'tcx> {
}
}
+/// Node in a tree of "match pairs", where each pair consists of a place to be
+/// tested, and a test to perform on that place.
+///
+/// Each node also has a list of subpairs (possibly empty) that must also match,
+/// and a reference to the THIR pattern it represents.
#[derive(Debug, Clone)]
pub(crate) struct MatchPair<'pat, 'tcx> {
/// This place...
- // This can be `None` if it referred to a non-captured place in a closure.
- // Invariant: place.is_none() => test_case is Irrefutable
- // In other words this must be `Some(_)` after simplification.
+ ///
+ /// ---
+ /// This can be `None` if it referred to a non-captured place in a closure.
+ ///
+ /// Invariant: Can only be `None` when `test_case` is `Irrefutable`.
+ /// Therefore this must be `Some(_)` after simplification.
place: Option>,
/// ... must pass this test...
- // Invariant: after creation and simplification in `Candidate::new()`, this must not be
- // `Irrefutable`.
+ ///
+ /// ---
+ /// Invariant: after creation and simplification in [`FlatPat::new`],
+ /// this must not be [`TestCase::Irrefutable`].
test_case: TestCase<'pat, 'tcx>,
/// ... and these subpairs must match.
@@ -1308,11 +1324,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
candidates: &mut [&mut Candidate<'pat, 'tcx>],
refutable: bool,
) -> BasicBlock {
+ // This will generate code to test scrutinee_place and branch to the appropriate arm block.
// See the doc comment on `match_candidates` for why we have an otherwise block.
- let otherwise_block = self.cfg.start_new_block();
-
- // This will generate code to test scrutinee_place and branch to the appropriate arm block
- self.match_candidates(match_start_span, scrutinee_span, block, otherwise_block, candidates);
+ let otherwise_block =
+ self.match_candidates(match_start_span, scrutinee_span, block, candidates);
// Link each leaf candidate to the `false_edge_start_block` of the next one.
let mut previous_candidate: Option<&mut Candidate<'_, '_>> = None;
@@ -1363,27 +1378,24 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
otherwise_block
}
- /// The main match algorithm. It begins with a set of candidates
- /// `candidates` and has the job of generating code to determine
- /// which of these candidates, if any, is the correct one. The
+ /// The main match algorithm. It begins with a set of candidates `candidates` and has the job of
+ /// generating code that branches to an appropriate block if the scrutinee matches one of these
+ /// candidates. The
/// candidates are sorted such that the first item in the list
/// has the highest priority. When a candidate is found to match
/// the value, we will set and generate a branch to the appropriate
/// pre-binding block.
///
- /// If we find that *NONE* of the candidates apply, we branch to `otherwise_block`.
+ /// If none of the candidates apply, we continue to the returned `otherwise_block`.
///
/// It might be surprising that the input can be non-exhaustive.
- /// Indeed, initially, it is not, because all matches are
+ /// Indeed, for matches, initially, it is not, because all matches are
/// exhaustive in Rust. But during processing we sometimes divide
/// up the list of candidates and recurse with a non-exhaustive
/// list. This is how our lowering approach (called "backtracking
/// automaton" in the literature) works.
/// See [`Builder::test_candidates`] for more details.
///
- /// If `fake_borrows` is `Some`, then places which need fake borrows
- /// will be added to it.
- ///
/// For an example of how we use `otherwise_block`, consider:
/// ```
/// # fn foo((x, y): (bool, bool)) -> u32 {
@@ -1408,7 +1420,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// }
/// if y {
/// if x {
- /// // This is actually unreachable because the `(true, true)` case was handled above.
+ /// // This is actually unreachable because the `(true, true)` case was handled above,
+ /// // but we don't know that from within the lowering algorithm.
/// // continue
/// } else {
/// return 3
@@ -1425,161 +1438,61 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// the algorithm. For more details on why we lower like this, see [`Builder::test_candidates`].
///
/// Note how we test `x` twice. This is the tradeoff of backtracking automata: we prefer smaller
- /// code size at the expense of non-optimal code paths.
+ /// code size so we accept non-optimal code paths.
#[instrument(skip(self), level = "debug")]
- fn match_candidates<'pat>(
+ fn match_candidates(
&mut self,
span: Span,
scrutinee_span: Span,
start_block: BasicBlock,
- otherwise_block: BasicBlock,
- candidates: &mut [&mut Candidate<'pat, 'tcx>],
- ) {
- // We process or-patterns here. If any candidate starts with an or-pattern, we have to
- // expand the or-pattern before we can proceed further.
- //
- // We can't expand them freely however. The rule is: if the candidate has an or-pattern as
- // its only remaining match pair, we can expand it freely. If it has other match pairs, we
- // can expand it but we can't process more candidates after it.
- //
- // If we didn't stop, the `otherwise` cases could get mixed up. E.g. in the following,
- // or-pattern simplification (in `merge_trivial_subcandidates`) makes it so the `1` and `2`
- // cases branch to a same block (which then tests `false`). If we took `(2, _)` in the same
- // set of candidates, when we reach the block that tests `false` we don't know whether we
- // came from `1` or `2`, hence we can't know where to branch on failure.
- // ```ignore(illustrative)
- // match (1, true) {
- // (1 | 2, false) => {},
- // (2, _) => {},
- // _ => {}
- // }
- // ```
- //
- // We therefore split the `candidates` slice in two, expand or-patterns in the first half,
- // and process both halves separately.
- let mut expand_until = 0;
- for (i, candidate) in candidates.iter().enumerate() {
- if matches!(
- &*candidate.match_pairs,
- [MatchPair { test_case: TestCase::Or { .. }, .. }, ..]
- ) {
- expand_until = i + 1;
- if candidate.match_pairs.len() > 1 {
- break;
- }
- }
- if expand_until != 0 {
- expand_until = i + 1;
- }
- }
- let (candidates_to_expand, remaining_candidates) = candidates.split_at_mut(expand_until);
-
+ candidates: &mut [&mut Candidate<'_, 'tcx>],
+ ) -> BasicBlock {
ensure_sufficient_stack(|| {
- if candidates_to_expand.is_empty() {
- // No candidates start with an or-pattern, we can continue.
- self.match_expanded_candidates(
- span,
- scrutinee_span,
- start_block,
- otherwise_block,
- remaining_candidates,
- );
- } else {
- // Expand one level of or-patterns for each candidate in `candidates_to_expand`.
- let mut expanded_candidates = Vec::new();
- for candidate in candidates_to_expand.iter_mut() {
- if let [MatchPair { test_case: TestCase::Or { .. }, .. }, ..] =
- &*candidate.match_pairs
- {
- let or_match_pair = candidate.match_pairs.remove(0);
- // Expand the or-pattern into subcandidates.
- self.create_or_subcandidates(candidate, or_match_pair);
- // Collect the newly created subcandidates.
- for subcandidate in candidate.subcandidates.iter_mut() {
- expanded_candidates.push(subcandidate);
- }
- } else {
- expanded_candidates.push(candidate);
- }
- }
-
- // Process the expanded candidates.
- let remainder_start = self.cfg.start_new_block();
- // There might be new or-patterns obtained from expanding the old ones, so we call
- // `match_candidates` again.
- self.match_candidates(
- span,
- scrutinee_span,
- start_block,
- remainder_start,
- expanded_candidates.as_mut_slice(),
- );
-
- // Simplify subcandidates and process any leftover match pairs.
- for candidate in candidates_to_expand {
- if !candidate.subcandidates.is_empty() {
- self.finalize_or_candidate(span, scrutinee_span, candidate);
- }
- }
-
- // Process the remaining candidates.
- self.match_candidates(
- span,
- scrutinee_span,
- remainder_start,
- otherwise_block,
- remaining_candidates,
- );
- }
- });
+ self.match_candidates_inner(span, scrutinee_span, start_block, candidates)
+ })
}
- /// Construct the decision tree for `candidates`. Caller must ensure that no candidate in
- /// `candidates` starts with an or-pattern.
- fn match_expanded_candidates(
+ /// Construct the decision tree for `candidates`. Don't call this, call `match_candidates`
+ /// instead to reserve sufficient stack space.
+ fn match_candidates_inner(
&mut self,
span: Span,
scrutinee_span: Span,
mut start_block: BasicBlock,
- otherwise_block: BasicBlock,
candidates: &mut [&mut Candidate<'_, 'tcx>],
- ) {
+ ) -> BasicBlock {
if let [first, ..] = candidates {
if first.false_edge_start_block.is_none() {
first.false_edge_start_block = Some(start_block);
}
}
- match candidates {
+ // Process a prefix of the candidates.
+ let rest = match candidates {
[] => {
- // If there are no candidates that still need testing, we're done. Since all matches are
- // exhaustive, execution should never reach this point.
- let source_info = self.source_info(span);
- self.cfg.goto(start_block, source_info, otherwise_block);
+ // If there are no candidates that still need testing, we're done.
+ return start_block;
}
[first, remaining @ ..] if first.match_pairs.is_empty() => {
// The first candidate has satisfied all its match pairs; we link it up and continue
// with the remaining candidates.
- start_block = self.select_matched_candidate(first, start_block);
- self.match_expanded_candidates(
- span,
- scrutinee_span,
- start_block,
- otherwise_block,
- remaining,
- )
+ let remainder_start = self.select_matched_candidate(first, start_block);
+ remainder_start.and(remaining)
+ }
+ candidates if candidates.iter().any(|candidate| candidate.starts_with_or_pattern()) => {
+ // If any candidate starts with an or-pattern, we have to expand the or-pattern before we
+ // can proceed further.
+ self.expand_and_match_or_candidates(span, scrutinee_span, start_block, candidates)
}
candidates => {
// The first candidate has some unsatisfied match pairs; we proceed to do more tests.
- self.test_candidates(
- span,
- scrutinee_span,
- candidates,
- start_block,
- otherwise_block,
- );
+ self.test_candidates(span, scrutinee_span, candidates, start_block)
}
- }
+ };
+
+ // Process any candidates that remain.
+ let remaining_candidates = unpack!(start_block = rest);
+ self.match_candidates(span, scrutinee_span, start_block, remaining_candidates)
}
/// Link up matched candidates.
@@ -1624,6 +1537,102 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
otherwise_block
}
+ /// Takes a list of candidates such that some of the candidates' first match pairs are
+ /// or-patterns. This expands as many or-patterns as possible and processes the resulting
+ /// candidates. Returns the unprocessed candidates if any.
+ fn expand_and_match_or_candidates<'pat, 'b, 'c>(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ start_block: BasicBlock,
+ candidates: &'b mut [&'c mut Candidate<'pat, 'tcx>],
+ ) -> BlockAnd<&'b mut [&'c mut Candidate<'pat, 'tcx>]> {
+ // We can't expand or-patterns freely. The rule is: if the candidate has an
+ // or-pattern as its only remaining match pair, we can expand it freely. If it has
+ // other match pairs, we can expand it but we can't process more candidates after
+ // it.
+ //
+ // If we didn't stop, the `otherwise` cases could get mixed up. E.g. in the
+ // following, or-pattern simplification (in `merge_trivial_subcandidates`) makes it
+ // so the `1` and `2` cases branch to a same block (which then tests `false`). If we
+ // took `(2, _)` in the same set of candidates, when we reach the block that tests
+ // `false` we don't know whether we came from `1` or `2`, hence we can't know where
+ // to branch on failure.
+ //
+ // ```ignore(illustrative)
+ // match (1, true) {
+ // (1 | 2, false) => {},
+ // (2, _) => {},
+ // _ => {}
+ // }
+ // ```
+ //
+ // We therefore split the `candidates` slice in two, expand or-patterns in the first half,
+ // and process the rest separately.
+ let mut expand_until = 0;
+ for (i, candidate) in candidates.iter().enumerate() {
+ expand_until = i + 1;
+ if candidate.match_pairs.len() > 1 && candidate.starts_with_or_pattern() {
+ // The candidate has an or-pattern as well as more match pairs: we must
+ // split the candidates list here.
+ break;
+ }
+ }
+ let (candidates_to_expand, remaining_candidates) = candidates.split_at_mut(expand_until);
+
+ // Expand one level of or-patterns for each candidate in `candidates_to_expand`.
+ let mut expanded_candidates = Vec::new();
+ for candidate in candidates_to_expand.iter_mut() {
+ if candidate.starts_with_or_pattern() {
+ let or_match_pair = candidate.match_pairs.remove(0);
+ // Expand the or-pattern into subcandidates.
+ self.create_or_subcandidates(candidate, or_match_pair);
+ // Collect the newly created subcandidates.
+ for subcandidate in candidate.subcandidates.iter_mut() {
+ expanded_candidates.push(subcandidate);
+ }
+ } else {
+ expanded_candidates.push(candidate);
+ }
+ }
+
+ // Process the expanded candidates.
+ let remainder_start = self.match_candidates(
+ span,
+ scrutinee_span,
+ start_block,
+ expanded_candidates.as_mut_slice(),
+ );
+
+ // Simplify subcandidates and process any leftover match pairs.
+ for candidate in candidates_to_expand {
+ if !candidate.subcandidates.is_empty() {
+ self.finalize_or_candidate(span, scrutinee_span, candidate);
+ }
+ }
+
+ remainder_start.and(remaining_candidates)
+ }
+
+ /// Given a match-pair that corresponds to an or-pattern, expand each subpattern into a new
+ /// subcandidate. Any candidate that has been expanded that way should be passed to
+ /// `finalize_or_candidate` after its subcandidates have been processed.
+ fn create_or_subcandidates<'pat>(
+ &mut self,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ match_pair: MatchPair<'pat, 'tcx>,
+ ) {
+ let TestCase::Or { pats } = match_pair.test_case else { bug!() };
+ debug!("expanding or-pattern: candidate={:#?}\npats={:#?}", candidate, pats);
+ candidate.or_span = Some(match_pair.pattern.span);
+ candidate.subcandidates = pats
+ .into_vec()
+ .into_iter()
+ .map(|flat_pat| Candidate::from_flat_pat(flat_pat, candidate.has_guard))
+ .collect();
+ candidate.subcandidates[0].false_edge_start_block = candidate.false_edge_start_block;
+ }
+
/// Simplify subcandidates and process any leftover match pairs. The candidate should have been
/// expanded with `create_or_subcandidates`.
///
@@ -1690,6 +1699,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.merge_trivial_subcandidates(candidate);
if !candidate.match_pairs.is_empty() {
+ let or_span = candidate.or_span.unwrap_or(candidate.extra_data.span);
+ let source_info = self.source_info(or_span);
// If more match pairs remain, test them after each subcandidate.
// We could add them to the or-candidates before the call to `test_or_pattern` but this
// would make it impossible to detect simplifiable or-patterns. That would guarantee
@@ -1703,6 +1714,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
assert!(leaf_candidate.match_pairs.is_empty());
leaf_candidate.match_pairs.extend(remaining_match_pairs.iter().cloned());
let or_start = leaf_candidate.pre_binding_block.unwrap();
+ let otherwise =
+ self.match_candidates(span, scrutinee_span, or_start, &mut [leaf_candidate]);
// In a case like `(P | Q, R | S)`, if `P` succeeds and `R | S` fails, we know `(Q,
// R | S)` will fail too. If there is no guard, we skip testing of `Q` by branching
// directly to `last_otherwise`. If there is a guard,
@@ -1713,36 +1726,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} else {
last_otherwise.unwrap()
};
- self.match_candidates(
- span,
- scrutinee_span,
- or_start,
- or_otherwise,
- &mut [leaf_candidate],
- );
+ self.cfg.goto(otherwise, source_info, or_otherwise);
});
}
}
- /// Given a match-pair that corresponds to an or-pattern, expand each subpattern into a new
- /// subcandidate. Any candidate that has been expanded that way should be passed to
- /// `finalize_or_candidate` after its subcandidates have been processed.
- fn create_or_subcandidates<'pat>(
- &mut self,
- candidate: &mut Candidate<'pat, 'tcx>,
- match_pair: MatchPair<'pat, 'tcx>,
- ) {
- let TestCase::Or { pats } = match_pair.test_case else { bug!() };
- debug!("expanding or-pattern: candidate={:#?}\npats={:#?}", candidate, pats);
- candidate.or_span = Some(match_pair.pattern.span);
- candidate.subcandidates = pats
- .into_vec()
- .into_iter()
- .map(|flat_pat| Candidate::from_flat_pat(flat_pat, candidate.has_guard))
- .collect();
- candidate.subcandidates[0].false_edge_start_block = candidate.false_edge_start_block;
- }
-
/// Try to merge all of the subcandidates of the given candidate into one. This avoids
/// exponentially large CFGs in cases like `(1 | 2, 3 | 4, ...)`. The candidate should have been
/// expanded with `create_or_subcandidates`.
@@ -1992,14 +1980,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// }
/// # }
/// ```
+ ///
+ /// We return the unprocessed candidates.
fn test_candidates<'pat, 'b, 'c>(
&mut self,
span: Span,
scrutinee_span: Span,
candidates: &'b mut [&'c mut Candidate<'pat, 'tcx>],
start_block: BasicBlock,
- otherwise_block: BasicBlock,
- ) {
+ ) -> BlockAnd<&'b mut [&'c mut Candidate<'pat, 'tcx>]> {
// Extract the match-pair from the highest priority candidate and build a test from it.
let (match_place, test) = self.pick_test(candidates);
@@ -2010,33 +1999,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// The block that we should branch to if none of the
// `target_candidates` match.
- let remainder_start = if !remaining_candidates.is_empty() {
- let remainder_start = self.cfg.start_new_block();
- self.match_candidates(
- span,
- scrutinee_span,
- remainder_start,
- otherwise_block,
- remaining_candidates,
- );
- remainder_start
- } else {
- otherwise_block
- };
+ let remainder_start = self.cfg.start_new_block();
// For each outcome of test, process the candidates that still apply.
let target_blocks: FxIndexMap<_, _> = target_candidates
.into_iter()
.map(|(branch, mut candidates)| {
- let candidate_start = self.cfg.start_new_block();
- self.match_candidates(
- span,
- scrutinee_span,
- candidate_start,
- remainder_start,
- &mut *candidates,
- );
- (branch, candidate_start)
+ let branch_start = self.cfg.start_new_block();
+ let branch_otherwise =
+ self.match_candidates(span, scrutinee_span, branch_start, &mut *candidates);
+ let source_info = self.source_info(span);
+ self.cfg.goto(branch_otherwise, source_info, remainder_start);
+ (branch, branch_start)
})
.collect();
@@ -2050,6 +2024,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&test,
target_blocks,
);
+
+ remainder_start.and(remaining_candidates)
}
}
diff --git a/compiler/rustc_mir_build/src/build/matches/util.rs b/compiler/rustc_mir_build/src/build/matches/util.rs
index 3bec154e1df5..e67fc843285e 100644
--- a/compiler/rustc_mir_build/src/build/matches/util.rs
+++ b/compiler/rustc_mir_build/src/build/matches/util.rs
@@ -1,78 +1,15 @@
use std::marker::PhantomData;
-use crate::build::expr::as_place::{PlaceBase, PlaceBuilder};
+use crate::build::expr::as_place::PlaceBase;
use crate::build::matches::{Binding, Candidate, FlatPat, MatchPair, TestCase};
use crate::build::Builder;
use rustc_data_structures::fx::FxIndexMap;
use rustc_middle::mir::*;
-use rustc_middle::thir::{self, *};
-use rustc_middle::ty::TypeVisitableExt;
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::Ty;
use rustc_span::Span;
use tracing::debug;
impl<'a, 'tcx> Builder<'a, 'tcx> {
- pub(crate) fn field_match_pairs<'pat>(
- &mut self,
- place: PlaceBuilder<'tcx>,
- subpatterns: &'pat [FieldPat<'tcx>],
- ) -> Vec> {
- subpatterns
- .iter()
- .map(|fieldpat| {
- let place =
- place.clone_project(PlaceElem::Field(fieldpat.field, fieldpat.pattern.ty));
- MatchPair::new(place, &fieldpat.pattern, self)
- })
- .collect()
- }
-
- pub(crate) fn prefix_slice_suffix<'pat>(
- &mut self,
- match_pairs: &mut Vec>,
- place: &PlaceBuilder<'tcx>,
- prefix: &'pat [Box>],
- opt_slice: &'pat Option>>,
- suffix: &'pat [Box>],
- ) {
- let tcx = self.tcx;
- let (min_length, exact_size) = if let Some(place_resolved) = place.try_to_place(self) {
- match place_resolved.ty(&self.local_decls, tcx).ty.kind() {
- ty::Array(_, length) => (length.eval_target_usize(tcx, self.param_env), true),
- _ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
- }
- } else {
- ((prefix.len() + suffix.len()).try_into().unwrap(), false)
- };
-
- match_pairs.extend(prefix.iter().enumerate().map(|(idx, subpattern)| {
- let elem =
- ProjectionElem::ConstantIndex { offset: idx as u64, min_length, from_end: false };
- MatchPair::new(place.clone_project(elem), subpattern, self)
- }));
-
- if let Some(subslice_pat) = opt_slice {
- let suffix_len = suffix.len() as u64;
- let subslice = place.clone_project(PlaceElem::Subslice {
- from: prefix.len() as u64,
- to: if exact_size { min_length - suffix_len } else { suffix_len },
- from_end: !exact_size,
- });
- match_pairs.push(MatchPair::new(subslice, subslice_pat, self));
- }
-
- match_pairs.extend(suffix.iter().rev().enumerate().map(|(idx, subpattern)| {
- let end_offset = (idx + 1) as u64;
- let elem = ProjectionElem::ConstantIndex {
- offset: if exact_size { min_length - end_offset } else { end_offset },
- min_length,
- from_end: !exact_size,
- };
- let place = place.clone_project(elem);
- MatchPair::new(place, subpattern, self)
- }));
- }
-
/// Creates a false edge to `imaginary_target` and a real edge to
/// real_target. If `imaginary_target` is none, or is the same as the real
/// target, a Goto is generated instead to simplify the generated MIR.
@@ -96,181 +33,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
-impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
- /// Recursively builds a `MatchPair` tree for the given pattern and its
- /// subpatterns.
- pub(in crate::build) fn new(
- mut place_builder: PlaceBuilder<'tcx>,
- pattern: &'pat Pat<'tcx>,
- cx: &mut Builder<'_, 'tcx>,
- ) -> MatchPair<'pat, 'tcx> {
- // Force the place type to the pattern's type.
- // FIXME(oli-obk): can we use this to simplify slice/array pattern hacks?
- if let Some(resolved) = place_builder.resolve_upvar(cx) {
- place_builder = resolved;
- }
-
- // Only add the OpaqueCast projection if the given place is an opaque type and the
- // expected type from the pattern is not.
- let may_need_cast = match place_builder.base() {
- PlaceBase::Local(local) => {
- let ty =
- Place::ty_from(local, place_builder.projection(), &cx.local_decls, cx.tcx).ty;
- ty != pattern.ty && ty.has_opaque_types()
- }
- _ => true,
- };
- if may_need_cast {
- place_builder = place_builder.project(ProjectionElem::OpaqueCast(pattern.ty));
- }
-
- let place = place_builder.try_to_place(cx);
- let default_irrefutable = || TestCase::Irrefutable { binding: None, ascription: None };
- let mut subpairs = Vec::new();
- let test_case = match pattern.kind {
- PatKind::Wild | PatKind::Error(_) => default_irrefutable(),
-
- PatKind::Or { ref pats } => TestCase::Or {
- pats: pats.iter().map(|pat| FlatPat::new(place_builder.clone(), pat, cx)).collect(),
- },
-
- PatKind::Range(ref range) => {
- if range.is_full_range(cx.tcx) == Some(true) {
- default_irrefutable()
- } else {
- TestCase::Range(range)
- }
- }
-
- PatKind::Constant { value } => TestCase::Constant { value },
-
- PatKind::AscribeUserType {
- ascription: thir::Ascription { ref annotation, variance },
- ref subpattern,
- ..
- } => {
- // Apply the type ascription to the value at `match_pair.place`
- let ascription = place.map(|source| super::Ascription {
- annotation: annotation.clone(),
- source,
- variance,
- });
-
- subpairs.push(MatchPair::new(place_builder, subpattern, cx));
- TestCase::Irrefutable { ascription, binding: None }
- }
-
- PatKind::Binding { mode, var, ref subpattern, .. } => {
- let binding = place.map(|source| super::Binding {
- span: pattern.span,
- source,
- var_id: var,
- binding_mode: mode,
- });
-
- if let Some(subpattern) = subpattern.as_ref() {
- // this is the `x @ P` case; have to keep matching against `P` now
- subpairs.push(MatchPair::new(place_builder, subpattern, cx));
- }
- TestCase::Irrefutable { ascription: None, binding }
- }
-
- PatKind::InlineConstant { subpattern: ref pattern, def, .. } => {
- // Apply a type ascription for the inline constant to the value at `match_pair.place`
- let ascription = place.map(|source| {
- let span = pattern.span;
- let parent_id = cx.tcx.typeck_root_def_id(cx.def_id.to_def_id());
- let args = ty::InlineConstArgs::new(
- cx.tcx,
- ty::InlineConstArgsParts {
- parent_args: ty::GenericArgs::identity_for_item(cx.tcx, parent_id),
- ty: cx.infcx.next_ty_var(span),
- },
- )
- .args;
- let user_ty = cx.infcx.canonicalize_user_type_annotation(ty::UserType::TypeOf(
- def.to_def_id(),
- ty::UserArgs { args, user_self_ty: None },
- ));
- let annotation = ty::CanonicalUserTypeAnnotation {
- inferred_ty: pattern.ty,
- span,
- user_ty: Box::new(user_ty),
- };
- super::Ascription { annotation, source, variance: ty::Contravariant }
- });
-
- subpairs.push(MatchPair::new(place_builder, pattern, cx));
- TestCase::Irrefutable { ascription, binding: None }
- }
-
- PatKind::Array { ref prefix, ref slice, ref suffix } => {
- cx.prefix_slice_suffix(&mut subpairs, &place_builder, prefix, slice, suffix);
- default_irrefutable()
- }
- PatKind::Slice { ref prefix, ref slice, ref suffix } => {
- cx.prefix_slice_suffix(&mut subpairs, &place_builder, prefix, slice, suffix);
-
- if prefix.is_empty() && slice.is_some() && suffix.is_empty() {
- default_irrefutable()
- } else {
- TestCase::Slice {
- len: prefix.len() + suffix.len(),
- variable_length: slice.is_some(),
- }
- }
- }
-
- PatKind::Variant { adt_def, variant_index, args, ref subpatterns } => {
- let downcast_place = place_builder.downcast(adt_def, variant_index); // `(x as Variant)`
- subpairs = cx.field_match_pairs(downcast_place, subpatterns);
-
- let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| {
- i == variant_index || {
- (cx.tcx.features().exhaustive_patterns
- || cx.tcx.features().min_exhaustive_patterns)
- && !v
- .inhabited_predicate(cx.tcx, adt_def)
- .instantiate(cx.tcx, args)
- .apply_ignore_module(cx.tcx, cx.param_env)
- }
- }) && (adt_def.did().is_local()
- || !adt_def.is_variant_list_non_exhaustive());
- if irrefutable {
- default_irrefutable()
- } else {
- TestCase::Variant { adt_def, variant_index }
- }
- }
-
- PatKind::Leaf { ref subpatterns } => {
- subpairs = cx.field_match_pairs(place_builder, subpatterns);
- default_irrefutable()
- }
-
- PatKind::Deref { ref subpattern } => {
- subpairs.push(MatchPair::new(place_builder.deref(), subpattern, cx));
- default_irrefutable()
- }
-
- PatKind::DerefPattern { ref subpattern, mutability } => {
- // Create a new temporary for each deref pattern.
- // FIXME(deref_patterns): dedup temporaries to avoid multiple `deref()` calls?
- let temp = cx.temp(
- Ty::new_ref(cx.tcx, cx.tcx.lifetimes.re_erased, subpattern.ty, mutability),
- pattern.span,
- );
- subpairs.push(MatchPair::new(PlaceBuilder::from(temp).deref(), subpattern, cx));
- TestCase::Deref { temp, mutability }
- }
-
- PatKind::Never => TestCase::Never,
- };
-
- MatchPair { place, test_case, subpairs, pattern }
- }
-}
-
/// Determine the set of places that have to be stable across match guards.
///
/// Returns a list of places that need a fake borrow along with a local to store it.
diff --git a/compiler/rustc_mir_dataflow/src/framework/mod.rs b/compiler/rustc_mir_dataflow/src/framework/mod.rs
index 09cdb055a3e8..6eaed0f77533 100644
--- a/compiler/rustc_mir_dataflow/src/framework/mod.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/mod.rs
@@ -402,7 +402,7 @@ where
/// building up a `GenKillSet` and then throwing it away.
pub trait GenKill {
/// Inserts `elem` into the state vector.
- fn gen(&mut self, elem: T);
+ fn gen_(&mut self, elem: T);
/// Removes `elem` from the state vector.
fn kill(&mut self, elem: T);
@@ -410,7 +410,7 @@ pub trait GenKill {
/// Calls `gen` for each element in `elems`.
fn gen_all(&mut self, elems: impl IntoIterator
- ) {
for elem in elems {
- self.gen(elem);
+ self.gen_(elem);
}
}
@@ -424,12 +424,12 @@ pub trait GenKill {
/// Stores a transfer function for a gen/kill problem.
///
-/// Calling `gen`/`kill` on a `GenKillSet` will "build up" a transfer function so that it can be
-/// applied multiple times efficiently. When there are multiple calls to `gen` and/or `kill` for
+/// Calling `gen_`/`kill` on a `GenKillSet` will "build up" a transfer function so that it can be
+/// applied multiple times efficiently. When there are multiple calls to `gen_` and/or `kill` for
/// the same element, the most recent one takes precedence.
#[derive(Clone)]
pub struct GenKillSet {
- gen: HybridBitSet,
+ gen_: HybridBitSet,
kill: HybridBitSet,
}
@@ -437,31 +437,31 @@ impl GenKillSet {
/// Creates a new transfer function that will leave the dataflow state unchanged.
pub fn identity(universe: usize) -> Self {
GenKillSet {
- gen: HybridBitSet::new_empty(universe),
+ gen_: HybridBitSet::new_empty(universe),
kill: HybridBitSet::new_empty(universe),
}
}
pub fn apply(&self, state: &mut impl BitSetExt) {
- state.union(&self.gen);
+ state.union(&self.gen_);
state.subtract(&self.kill);
}
}
impl GenKill for GenKillSet {
- fn gen(&mut self, elem: T) {
- self.gen.insert(elem);
+ fn gen_(&mut self, elem: T) {
+ self.gen_.insert(elem);
self.kill.remove(elem);
}
fn kill(&mut self, elem: T) {
self.kill.insert(elem);
- self.gen.remove(elem);
+ self.gen_.remove(elem);
}
}
impl GenKill for BitSet {
- fn gen(&mut self, elem: T) {
+ fn gen_(&mut self, elem: T) {
self.insert(elem);
}
@@ -471,7 +471,7 @@ impl GenKill for BitSet {
}
impl GenKill for ChunkedBitSet {
- fn gen(&mut self, elem: T) {
+ fn gen_(&mut self, elem: T) {
self.insert(elem);
}
@@ -481,11 +481,11 @@ impl GenKill for ChunkedBitSet {
}
impl> GenKill for MaybeReachable
{
- fn gen(&mut self, elem: T) {
+ fn gen_(&mut self, elem: T) {
match self {
// If the state is not reachable, adding an element does nothing.
MaybeReachable::Unreachable => {}
- MaybeReachable::Reachable(set) => set.gen(elem),
+ MaybeReachable::Reachable(set) => set.gen_(elem),
}
}
@@ -499,7 +499,7 @@ impl> GenKill for MaybeReachable {
}
impl GenKill for lattice::Dual> {
- fn gen(&mut self, elem: T) {
+ fn gen_(&mut self, elem: T) {
self.0.insert(elem);
}
diff --git a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
index 574da949b0ed..885fdd0d58be 100644
--- a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
@@ -97,7 +97,7 @@ where
Rvalue::AddressOf(_, borrowed_place)
| Rvalue::Ref(_, BorrowKind::Mut { .. } | BorrowKind::Shared, borrowed_place) => {
if !borrowed_place.is_indirect() {
- self.trans.gen(borrowed_place.local);
+ self.trans.gen_(borrowed_place.local);
}
}
@@ -131,7 +131,7 @@ where
//
// [#61069]: https://github.com/rust-lang/rust/pull/61069
if !dropped_place.is_indirect() {
- self.trans.gen(dropped_place.local);
+ self.trans.gen_(dropped_place.local);
}
}
@@ -159,8 +159,8 @@ pub fn borrowed_locals(body: &Body<'_>) -> BitSet {
impl GenKill for Borrowed {
#[inline]
- fn gen(&mut self, elem: Local) {
- self.0.gen(elem)
+ fn gen_(&mut self, elem: Local) {
+ self.0.gen_(elem)
}
#[inline]
fn kill(&mut self, _: Local) {
diff --git a/compiler/rustc_mir_dataflow/src/impls/initialized.rs b/compiler/rustc_mir_dataflow/src/impls/initialized.rs
index ffcf630b653c..a9bceeccdce2 100644
--- a/compiler/rustc_mir_dataflow/src/impls/initialized.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/initialized.rs
@@ -283,7 +283,7 @@ impl<'a, 'mir, 'tcx> MaybeInitializedPlaces<'a, 'mir, 'tcx> {
) {
match state {
DropFlagState::Absent => trans.kill(path),
- DropFlagState::Present => trans.gen(path),
+ DropFlagState::Present => trans.gen_(path),
}
}
}
@@ -295,7 +295,7 @@ impl<'a, 'tcx> MaybeUninitializedPlaces<'a, '_, 'tcx> {
state: DropFlagState,
) {
match state {
- DropFlagState::Absent => trans.gen(path),
+ DropFlagState::Absent => trans.gen_(path),
DropFlagState::Present => trans.kill(path),
}
}
@@ -309,7 +309,7 @@ impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
) {
match state {
DropFlagState::Absent => trans.kill(path),
- DropFlagState::Present => trans.gen(path),
+ DropFlagState::Present => trans.gen_(path),
}
}
}
@@ -331,7 +331,7 @@ impl<'tcx> AnalysisDomain<'tcx> for MaybeInitializedPlaces<'_, '_, 'tcx> {
MaybeReachable::Reachable(ChunkedBitSet::new_empty(self.move_data().move_paths.len()));
drop_flag_effects_for_function_entry(self.body, self.mdpe, |path, s| {
assert!(s == DropFlagState::Present);
- state.gen(path);
+ state.gen_(path);
});
}
}
@@ -362,7 +362,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, '_, 'tcx> {
&& let LookupResult::Exact(mpi) = self.move_data().rev_lookup.find(place.as_ref())
{
on_all_children_bits(self.move_data(), mpi, |child| {
- trans.gen(child);
+ trans.gen_(child);
})
}
}
@@ -400,7 +400,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, '_, 'tcx> {
self.move_data(),
self.move_data().rev_lookup.find(place.as_ref()),
|mpi| {
- trans.gen(mpi);
+ trans.gen_(mpi);
},
);
});
@@ -572,7 +572,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeUninitializedPlaces<'_, '_, 'tcx> {
self.move_data(),
enum_place,
variant,
- |mpi| trans.gen(mpi),
+ |mpi| trans.gen_(mpi),
);
});
}
@@ -643,7 +643,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> {
self.move_data(),
self.move_data().rev_lookup.find(place.as_ref()),
|mpi| {
- trans.gen(mpi);
+ trans.gen_(mpi);
},
);
});
@@ -738,7 +738,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for EverInitializedPlaces<'_, '_, 'tcx> {
let call_loc = self.body.terminator_loc(block);
for init_index in &init_loc_map[call_loc] {
- trans.gen(*init_index);
+ trans.gen_(*init_index);
}
}
}
diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
index 334fa9976f03..48bdb1316012 100644
--- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
@@ -116,7 +116,7 @@ where
self.0.kill(place.local);
}
}
- Some(DefUse::Use) => self.0.gen(place.local),
+ Some(DefUse::Use) => self.0.gen_(place.local),
None => {}
}
@@ -154,7 +154,7 @@ impl DefUse {
fn apply(trans: &mut impl GenKill, place: Place<'_>, context: PlaceContext) {
match DefUse::for_place(place, context) {
Some(DefUse::Def) => trans.kill(place.local),
- Some(DefUse::Use) => trans.gen(place.local),
+ Some(DefUse::Use) => trans.gen_(place.local),
None => {}
}
}
diff --git a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
index f850a7102773..682cec12f1fb 100644
--- a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
@@ -54,7 +54,7 @@ impl<'tcx, 'a> crate::GenKillAnalysis<'tcx> for MaybeStorageLive<'a> {
_: Location,
) {
match stmt.kind {
- StatementKind::StorageLive(l) => trans.gen(l),
+ StatementKind::StorageLive(l) => trans.gen_(l),
StatementKind::StorageDead(l) => trans.kill(l),
_ => (),
}
@@ -127,7 +127,7 @@ impl<'tcx, 'a> crate::GenKillAnalysis<'tcx> for MaybeStorageDead<'a> {
) {
match stmt.kind {
StatementKind::StorageLive(l) => trans.kill(l),
- StatementKind::StorageDead(l) => trans.gen(l),
+ StatementKind::StorageDead(l) => trans.gen_(l),
_ => (),
}
}
@@ -208,7 +208,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, 'tcx> {
StatementKind::Assign(box (place, _))
| StatementKind::SetDiscriminant { box place, .. }
| StatementKind::Deinit(box place) => {
- trans.gen(place.local);
+ trans.gen_(place.local);
}
// Nothing to do for these. Match exhaustively so this fails to compile when new
@@ -250,7 +250,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, 'tcx> {
match &terminator.kind {
TerminatorKind::Call { destination, .. } => {
- trans.gen(destination.local);
+ trans.gen_(destination.local);
}
// Note that we do *not* gen the `resume_arg` of `Yield` terminators. The reason for
@@ -265,7 +265,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, 'tcx> {
InlineAsmOperand::Out { place, .. }
| InlineAsmOperand::InOut { out_place: place, .. } => {
if let Some(place) = place {
- trans.gen(place.local);
+ trans.gen_(place.local);
}
}
InlineAsmOperand::In { .. }
@@ -341,7 +341,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, 'tcx> {
_block: BasicBlock,
return_places: CallReturnPlaces<'_, 'tcx>,
) {
- return_places.for_each(|place| trans.gen(place.local));
+ return_places.for_each(|place| trans.gen_(place.local));
}
}
diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs
index 1582c2e8a906..c9f5d38fe2c1 100644
--- a/compiler/rustc_mir_dataflow/src/value_analysis.rs
+++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs
@@ -32,15 +32,16 @@
//! Because of that, we can assume that the only way to change the value behind a tracked place is
//! by direct assignment.
-use std::collections::VecDeque;
use std::fmt::{Debug, Formatter};
use std::ops::Range;
-use rustc_data_structures::fx::{FxHashMap, StdEntry};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::{FxHashMap, FxIndexSet, StdEntry};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::bug;
+use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -58,7 +59,7 @@ pub trait ValueAnalysis<'tcx> {
const NAME: &'static str;
- fn map(&self) -> ⤅
+ fn map(&self) -> &Map<'tcx>;
fn handle_statement(&self, statement: &Statement<'tcx>, state: &mut State) {
self.super_statement(statement, state)
@@ -523,12 +524,12 @@ impl State {
}
/// Assign `value` to all places that are contained in `place` or may alias one.
- pub fn flood_with(&mut self, place: PlaceRef<'_>, map: &Map, value: V) {
+ pub fn flood_with(&mut self, place: PlaceRef<'_>, map: &Map<'_>, value: V) {
self.flood_with_tail_elem(place, None, map, value)
}
/// Assign `TOP` to all places that are contained in `place` or may alias one.
- pub fn flood(&mut self, place: PlaceRef<'_>, map: &Map)
+ pub fn flood(&mut self, place: PlaceRef<'_>, map: &Map<'_>)
where
V: HasTop,
{
@@ -536,12 +537,12 @@ impl State {
}
/// Assign `value` to the discriminant of `place` and all places that may alias it.
- fn flood_discr_with(&mut self, place: PlaceRef<'_>, map: &Map, value: V) {
+ fn flood_discr_with(&mut self, place: PlaceRef<'_>, map: &Map<'_>, value: V) {
self.flood_with_tail_elem(place, Some(TrackElem::Discriminant), map, value)
}
/// Assign `TOP` to the discriminant of `place` and all places that may alias it.
- pub fn flood_discr(&mut self, place: PlaceRef<'_>, map: &Map)
+ pub fn flood_discr(&mut self, place: PlaceRef<'_>, map: &Map<'_>)
where
V: HasTop,
{
@@ -559,7 +560,7 @@ impl State {
&mut self,
place: PlaceRef<'_>,
tail_elem: Option,
- map: &Map,
+ map: &Map<'_>,
value: V,
) {
let State::Reachable(values) = self else { return };
@@ -570,7 +571,7 @@ impl State {
/// This does nothing if the place is not tracked.
///
/// The target place must have been flooded before calling this method.
- fn insert_idx(&mut self, target: PlaceIndex, result: ValueOrPlace, map: &Map) {
+ fn insert_idx(&mut self, target: PlaceIndex, result: ValueOrPlace, map: &Map<'_>) {
match result {
ValueOrPlace::Value(value) => self.insert_value_idx(target, value, map),
ValueOrPlace::Place(source) => self.insert_place_idx(target, source, map),
@@ -581,7 +582,7 @@ impl State {
/// This does nothing if the place is not tracked.
///
/// The target place must have been flooded before calling this method.
- pub fn insert_value_idx(&mut self, target: PlaceIndex, value: V, map: &Map) {
+ pub fn insert_value_idx(&mut self, target: PlaceIndex, value: V, map: &Map<'_>) {
let State::Reachable(values) = self else { return };
if let Some(value_index) = map.places[target].value_index {
values.insert(value_index, value)
@@ -595,7 +596,7 @@ impl State {
/// places that are non-overlapping or identical.
///
/// The target place must have been flooded before calling this method.
- pub fn insert_place_idx(&mut self, target: PlaceIndex, source: PlaceIndex, map: &Map) {
+ pub fn insert_place_idx(&mut self, target: PlaceIndex, source: PlaceIndex, map: &Map<'_>) {
let State::Reachable(values) = self else { return };
// If both places are tracked, we copy the value to the target.
@@ -616,7 +617,7 @@ impl State {
}
/// Helper method to interpret `target = result`.
- pub fn assign(&mut self, target: PlaceRef<'_>, result: ValueOrPlace, map: &Map)
+ pub fn assign(&mut self, target: PlaceRef<'_>, result: ValueOrPlace, map: &Map<'_>)
where
V: HasTop,
{
@@ -627,7 +628,7 @@ impl State {
}
/// Helper method for assignments to a discriminant.
- pub fn assign_discr(&mut self, target: PlaceRef<'_>, result: ValueOrPlace, map: &Map)
+ pub fn assign_discr(&mut self, target: PlaceRef<'_>, result: ValueOrPlace, map: &Map<'_>)
where
V: HasTop,
{
@@ -638,25 +639,25 @@ impl State {
}
/// Retrieve the value stored for a place, or `None` if it is not tracked.
- pub fn try_get(&self, place: PlaceRef<'_>, map: &Map) -> Option {
+ pub fn try_get(&self, place: PlaceRef<'_>, map: &Map<'_>) -> Option {
let place = map.find(place)?;
self.try_get_idx(place, map)
}
/// Retrieve the discriminant stored for a place, or `None` if it is not tracked.
- pub fn try_get_discr(&self, place: PlaceRef<'_>, map: &Map) -> Option {
+ pub fn try_get_discr(&self, place: PlaceRef<'_>, map: &Map<'_>) -> Option {
let place = map.find_discr(place)?;
self.try_get_idx(place, map)
}
/// Retrieve the slice length stored for a place, or `None` if it is not tracked.
- pub fn try_get_len(&self, place: PlaceRef<'_>, map: &Map) -> Option {
+ pub fn try_get_len(&self, place: PlaceRef<'_>, map: &Map<'_>) -> Option {
let place = map.find_len(place)?;
self.try_get_idx(place, map)
}
/// Retrieve the value stored for a place index, or `None` if it is not tracked.
- pub fn try_get_idx(&self, place: PlaceIndex, map: &Map) -> Option {
+ pub fn try_get_idx(&self, place: PlaceIndex, map: &Map<'_>) -> Option {
match self {
State::Reachable(values) => {
map.places[place].value_index.map(|v| values.get(v).clone())
@@ -668,7 +669,7 @@ impl State {
/// Retrieve the value stored for a place, or ⊤ if it is not tracked.
///
/// This method returns ⊥ if the place is tracked and the state is unreachable.
- pub fn get(&self, place: PlaceRef<'_>, map: &Map) -> V
+ pub fn get(&self, place: PlaceRef<'_>, map: &Map<'_>) -> V
where
V: HasBottom + HasTop,
{
@@ -682,7 +683,7 @@ impl State {
/// Retrieve the value stored for a place, or ⊤ if it is not tracked.
///
/// This method returns ⊥ the current state is unreachable.
- pub fn get_discr(&self, place: PlaceRef<'_>, map: &Map) -> V
+ pub fn get_discr(&self, place: PlaceRef<'_>, map: &Map<'_>) -> V
where
V: HasBottom + HasTop,
{
@@ -696,7 +697,7 @@ impl State {
/// Retrieve the value stored for a place, or ⊤ if it is not tracked.
///
/// This method returns ⊥ the current state is unreachable.
- pub fn get_len(&self, place: PlaceRef<'_>, map: &Map) -> V
+ pub fn get_len(&self, place: PlaceRef<'_>, map: &Map<'_>) -> V
where
V: HasBottom + HasTop,
{
@@ -710,7 +711,7 @@ impl State {
/// Retrieve the value stored for a place index, or ⊤ if it is not tracked.
///
/// This method returns ⊥ the current state is unreachable.
- pub fn get_idx(&self, place: PlaceIndex, map: &Map) -> V
+ pub fn get_idx(&self, place: PlaceIndex, map: &Map<'_>) -> V
where
V: HasBottom + HasTop,
{
@@ -746,25 +747,25 @@ impl JoinSemiLattice for State {
/// - For iteration, every [`PlaceInfo`] contains an intrusive linked list of its children.
/// - To directly get the child for a specific projection, there is a `projections` map.
#[derive(Debug)]
-pub struct Map {
+pub struct Map<'tcx> {
locals: IndexVec>,
projections: FxHashMap<(PlaceIndex, TrackElem), PlaceIndex>,
- places: IndexVec,
+ places: IndexVec>,
value_count: usize,
// The Range corresponds to a slice into `inner_values_buffer`.
inner_values: IndexVec>,
inner_values_buffer: Vec,
}
-impl Map {
+impl<'tcx> Map<'tcx> {
/// Returns a map that only tracks places whose type has scalar layout.
///
/// This is currently the only way to create a [`Map`]. The way in which the tracked places are
/// chosen is an implementation detail and may not be relied upon (other than that their type
/// are scalars).
- pub fn new<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, value_limit: Option) -> Self {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, value_limit: Option) -> Self {
let mut map = Self {
- locals: IndexVec::new(),
+ locals: IndexVec::from_elem(None, &body.local_decls),
projections: FxHashMap::default(),
places: IndexVec::new(),
value_count: 0,
@@ -778,18 +779,15 @@ impl Map {
}
/// Register all non-excluded places that have scalar layout.
- fn register<'tcx>(
+ #[tracing::instrument(level = "trace", skip(self, tcx, body))]
+ fn register(
&mut self,
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
exclude: BitSet,
value_limit: Option,
) {
- let mut worklist = VecDeque::with_capacity(value_limit.unwrap_or(body.local_decls.len()));
- let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
-
// Start by constructing the places for each bare local.
- self.locals = IndexVec::from_elem(None, &body.local_decls);
for (local, decl) in body.local_decls.iter_enumerated() {
if exclude.contains(local) {
continue;
@@ -797,16 +795,60 @@ impl Map {
// Create a place for the local.
debug_assert!(self.locals[local].is_none());
- let place = self.places.push(PlaceInfo::new(None));
+ let place = self.places.push(PlaceInfo::new(decl.ty, None));
self.locals[local] = Some(place);
-
- // And push the eventual children places to the worklist.
- self.register_children(tcx, param_env, place, decl.ty, &mut worklist);
}
- // `place.elem1.elem2` with type `ty`.
- // `elem1` is either `Some(Variant(i))` or `None`.
- while let Some((mut place, elem1, elem2, ty)) = worklist.pop_front() {
+ // Collect syntactic places and assignments between them.
+ let mut collector =
+ PlaceCollector { tcx, body, map: self, assignments: Default::default() };
+ collector.visit_body(body);
+ let PlaceCollector { mut assignments, .. } = collector;
+
+ // Just collecting syntactic places is not enough. We may need to propagate this pattern:
+ // _1 = (const 5u32, const 13i64);
+ // _2 = _1;
+ // _3 = (_2.0 as u32);
+ //
+ // `_1.0` does not appear, but we still need to track it. This is achieved by propagating
+ // projections from assignments. We recorded an assignment between `_2` and `_1`, so we
+ // want `_1` and `_2` to have the same sub-places.
+ //
+ // This is what this fixpoint loop does. While we are still creating places, run through
+ // all the assignments, and register places for children.
+ let mut num_places = 0;
+ while num_places < self.places.len() {
+ num_places = self.places.len();
+
+ for assign in 0.. {
+ let Some(&(lhs, rhs)) = assignments.get_index(assign) else { break };
+
+ // Mirror children from `lhs` in `rhs`.
+ let mut child = self.places[lhs].first_child;
+ while let Some(lhs_child) = child {
+ let PlaceInfo { ty, proj_elem, next_sibling, .. } = self.places[lhs_child];
+ let rhs_child =
+ self.register_place(ty, rhs, proj_elem.expect("child is not a projection"));
+ assignments.insert((lhs_child, rhs_child));
+ child = next_sibling;
+ }
+
+ // Conversely, mirror children from `rhs` in `lhs`.
+ let mut child = self.places[rhs].first_child;
+ while let Some(rhs_child) = child {
+ let PlaceInfo { ty, proj_elem, next_sibling, .. } = self.places[rhs_child];
+ let lhs_child =
+ self.register_place(ty, lhs, proj_elem.expect("child is not a projection"));
+ assignments.insert((lhs_child, rhs_child));
+ child = next_sibling;
+ }
+ }
+ }
+ drop(assignments);
+
+ // Create values for places whose type have scalar layout.
+ let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+ for place_info in self.places.iter_mut() {
// The user requires a bound on the number of created values.
if let Some(value_limit) = value_limit
&& self.value_count >= value_limit
@@ -814,19 +856,18 @@ impl Map {
break;
}
- // Create a place for this projection.
- for elem in [elem1, Some(elem2)].into_iter().flatten() {
- place = *self.projections.entry((place, elem)).or_insert_with(|| {
- // Prepend new child to the linked list.
- let next = self.places.push(PlaceInfo::new(Some(elem)));
- self.places[next].next_sibling = self.places[place].first_child;
- self.places[place].first_child = Some(next);
- next
- });
+ if let Ok(ty) = tcx.try_normalize_erasing_regions(param_env, place_info.ty) {
+ place_info.ty = ty;
}
- // And push the eventual children places to the worklist.
- self.register_children(tcx, param_env, place, ty, &mut worklist);
+ // Allocate a value slot if it doesn't have one, and the user requested one.
+ assert!(place_info.value_index.is_none());
+ if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty))
+ && layout.abi.is_scalar()
+ {
+ place_info.value_index = Some(self.value_count.into());
+ self.value_count += 1;
+ }
}
// Pre-compute the tree of ValueIndex nested in each PlaceIndex.
@@ -852,68 +893,14 @@ impl Map {
self.projections.retain(|_, child| !self.inner_values[*child].is_empty());
}
- /// Potentially register the (local, projection) place and its fields, recursively.
- ///
- /// Invariant: The projection must only contain trackable elements.
- fn register_children<'tcx>(
- &mut self,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- place: PlaceIndex,
- ty: Ty<'tcx>,
- worklist: &mut VecDeque<(PlaceIndex, Option, TrackElem, Ty<'tcx>)>,
- ) {
- // Allocate a value slot if it doesn't have one, and the user requested one.
- assert!(self.places[place].value_index.is_none());
- if tcx.layout_of(param_env.and(ty)).is_ok_and(|layout| layout.abi.is_scalar()) {
- self.places[place].value_index = Some(self.value_count.into());
- self.value_count += 1;
- }
-
- // For enums, directly create the `Discriminant`, as that's their main use.
- if ty.is_enum() {
- // Prepend new child to the linked list.
- let discr = self.places.push(PlaceInfo::new(Some(TrackElem::Discriminant)));
- self.places[discr].next_sibling = self.places[place].first_child;
- self.places[place].first_child = Some(discr);
- let old = self.projections.insert((place, TrackElem::Discriminant), discr);
- assert!(old.is_none());
-
- // Allocate a value slot since it doesn't have one.
- assert!(self.places[discr].value_index.is_none());
- self.places[discr].value_index = Some(self.value_count.into());
- self.value_count += 1;
- }
-
- if let ty::Ref(_, ref_ty, _) | ty::RawPtr(ref_ty, _) = ty.kind()
- && let ty::Slice(..) = ref_ty.kind()
- // The user may have written a predicate like `[T]: Sized` in their where clauses,
- // which makes slices scalars.
- && self.places[place].value_index.is_none()
- {
- // Prepend new child to the linked list.
- let len = self.places.push(PlaceInfo::new(Some(TrackElem::DerefLen)));
- self.places[len].next_sibling = self.places[place].first_child;
- self.places[place].first_child = Some(len);
-
- let old = self.projections.insert((place, TrackElem::DerefLen), len);
- assert!(old.is_none());
-
- // Allocate a value slot since it doesn't have one.
- assert!(self.places[len].value_index.is_none());
- self.places[len].value_index = Some(self.value_count.into());
- self.value_count += 1;
- }
-
- // Recurse with all fields of this place.
- iter_fields(ty, tcx, param_env, |variant, field, ty| {
- worklist.push_back((
- place,
- variant.map(TrackElem::Variant),
- TrackElem::Field(field),
- ty,
- ))
- });
+ #[tracing::instrument(level = "trace", skip(self), ret)]
+ fn register_place(&mut self, ty: Ty<'tcx>, base: PlaceIndex, elem: TrackElem) -> PlaceIndex {
+ *self.projections.entry((base, elem)).or_insert_with(|| {
+ let next = self.places.push(PlaceInfo::new(ty, Some(elem)));
+ self.places[next].next_sibling = self.places[base].first_child;
+ self.places[base].first_child = Some(next);
+ next
+ })
}
/// Precompute the list of values inside `root` and store it inside
@@ -934,7 +921,108 @@ impl Map {
let end = self.inner_values_buffer.len();
self.inner_values[root] = start..end;
}
+}
+struct PlaceCollector<'a, 'b, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'b Body<'tcx>,
+ map: &'a mut Map<'tcx>,
+ assignments: FxIndexSet<(PlaceIndex, PlaceIndex)>,
+}
+
+impl<'tcx> PlaceCollector<'_, '_, 'tcx> {
+ #[tracing::instrument(level = "trace", skip(self))]
+ fn register_place(&mut self, place: Place<'tcx>) -> Option {
+ // Create a place for this projection.
+ let mut place_index = self.map.locals[place.local]?;
+ let mut ty = PlaceTy::from_ty(self.body.local_decls[place.local].ty);
+ tracing::trace!(?place_index, ?ty);
+
+ if let ty::Ref(_, ref_ty, _) | ty::RawPtr(ref_ty, _) = ty.ty.kind()
+ && let ty::Slice(..) = ref_ty.kind()
+ {
+ self.map.register_place(self.tcx.types.usize, place_index, TrackElem::DerefLen);
+ } else if ty.ty.is_enum() {
+ let discriminant_ty = ty.ty.discriminant_ty(self.tcx);
+ self.map.register_place(discriminant_ty, place_index, TrackElem::Discriminant);
+ }
+
+ for proj in place.projection {
+ let track_elem = proj.try_into().ok()?;
+ ty = ty.projection_ty(self.tcx, proj);
+ place_index = self.map.register_place(ty.ty, place_index, track_elem);
+ tracing::trace!(?proj, ?place_index, ?ty);
+
+ if let ty::Ref(_, ref_ty, _) | ty::RawPtr(ref_ty, _) = ty.ty.kind()
+ && let ty::Slice(..) = ref_ty.kind()
+ {
+ self.map.register_place(self.tcx.types.usize, place_index, TrackElem::DerefLen);
+ } else if ty.ty.is_enum() {
+ let discriminant_ty = ty.ty.discriminant_ty(self.tcx);
+ self.map.register_place(discriminant_ty, place_index, TrackElem::Discriminant);
+ }
+ }
+
+ Some(place_index)
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for PlaceCollector<'_, '_, 'tcx> {
+ #[tracing::instrument(level = "trace", skip(self))]
+ fn visit_place(&mut self, place: &Place<'tcx>, ctxt: PlaceContext, _: Location) {
+ if !ctxt.is_use() {
+ return;
+ }
+
+ self.register_place(*place);
+ }
+
+ fn visit_assign(&mut self, lhs: &Place<'tcx>, rhs: &Rvalue<'tcx>, location: Location) {
+ self.super_assign(lhs, rhs, location);
+
+ match rhs {
+ Rvalue::Use(Operand::Move(rhs) | Operand::Copy(rhs)) | Rvalue::CopyForDeref(rhs) => {
+ let Some(lhs) = self.register_place(*lhs) else { return };
+ let Some(rhs) = self.register_place(*rhs) else { return };
+ self.assignments.insert((lhs, rhs));
+ }
+ Rvalue::Aggregate(kind, fields) => {
+ let Some(mut lhs) = self.register_place(*lhs) else { return };
+ match **kind {
+ // Do not propagate unions.
+ AggregateKind::Adt(_, _, _, _, Some(_)) => return,
+ AggregateKind::Adt(_, variant, _, _, None) => {
+ let ty = self.map.places[lhs].ty;
+ if ty.is_enum() {
+ lhs = self.map.register_place(ty, lhs, TrackElem::Variant(variant));
+ }
+ }
+ AggregateKind::RawPtr(..)
+ | AggregateKind::Array(_)
+ | AggregateKind::Tuple
+ | AggregateKind::Closure(..)
+ | AggregateKind::Coroutine(..)
+ | AggregateKind::CoroutineClosure(..) => {}
+ }
+ for (index, field) in fields.iter_enumerated() {
+ if let Some(rhs) = field.place()
+ && let Some(rhs) = self.register_place(rhs)
+ {
+ let lhs = self.map.register_place(
+ self.map.places[rhs].ty,
+ lhs,
+ TrackElem::Field(index),
+ );
+ self.assignments.insert((lhs, rhs));
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+}
+
+impl<'tcx> Map<'tcx> {
/// Applies a single projection element, yielding the corresponding child.
pub fn apply(&self, place: PlaceIndex, elem: TrackElem) -> Option {
self.projections.get(&(place, elem)).copied()
@@ -974,7 +1062,10 @@ impl Map {
}
/// Iterate over all direct children.
- fn children(&self, parent: PlaceIndex) -> impl Iterator- + '_ {
+ fn children(
+ &self,
+ parent: PlaceIndex,
+ ) -> impl Iterator
- + Captures<'_> + Captures<'tcx> {
Children::new(self, parent)
}
@@ -1081,7 +1172,10 @@ impl Map {
/// Together, `first_child` and `next_sibling` form an intrusive linked list, which is used to
/// model a tree structure (a replacement for a member like `children: Vec`).
#[derive(Debug)]
-struct PlaceInfo {
+struct PlaceInfo<'tcx> {
+ /// Type of the referenced place.
+ ty: Ty<'tcx>,
+
/// We store a [`ValueIndex`] if and only if the placed is tracked by the analysis.
value_index: Option,
@@ -1095,24 +1189,24 @@ struct PlaceInfo {
next_sibling: Option,
}
-impl PlaceInfo {
- fn new(proj_elem: Option) -> Self {
- Self { next_sibling: None, first_child: None, proj_elem, value_index: None }
+impl<'tcx> PlaceInfo<'tcx> {
+ fn new(ty: Ty<'tcx>, proj_elem: Option) -> Self {
+ Self { ty, next_sibling: None, first_child: None, proj_elem, value_index: None }
}
}
-struct Children<'a> {
- map: &'a Map,
+struct Children<'a, 'tcx> {
+ map: &'a Map<'tcx>,
next: Option,
}
-impl<'a> Children<'a> {
- fn new(map: &'a Map, parent: PlaceIndex) -> Self {
+impl<'a, 'tcx> Children<'a, 'tcx> {
+ fn new(map: &'a Map<'tcx>, parent: PlaceIndex) -> Self {
Self { map, next: map.places[parent].first_child }
}
}
-impl<'a> Iterator for Children<'a> {
+impl Iterator for Children<'_, '_> {
type Item = PlaceIndex;
fn next(&mut self) -> Option {
@@ -1261,7 +1355,7 @@ fn debug_with_context_rec(
place_str: &str,
new: &StateData,
old: Option<&StateData>,
- map: &Map,
+ map: &Map<'_>,
f: &mut Formatter<'_>,
) -> std::fmt::Result {
if let Some(value) = map.places[place].value_index {
@@ -1305,7 +1399,7 @@ fn debug_with_context_rec(
fn debug_with_context(
new: &StateData,
old: Option<&StateData>,
- map: &Map,
+ map: &Map<'_>,
f: &mut Formatter<'_>,
) -> std::fmt::Result {
for (local, place) in map.locals.iter_enumerated() {
diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs
index 25297245172a..2ac08ea85d25 100644
--- a/compiler/rustc_mir_transform/src/coverage/mappings.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs
@@ -56,6 +56,10 @@ pub(super) struct MCDCDecision {
#[derive(Default)]
pub(super) struct ExtractedMappings {
+ /// Store our own copy of [`CoverageGraph::num_nodes`], so that we don't
+ /// need access to the whole graph when allocating per-BCB data. This is
+ /// only public so that other code can still use exhaustive destructuring.
+ pub(super) num_bcbs: usize,
pub(super) code_mappings: Vec,
pub(super) branch_pairs: Vec,
pub(super) mcdc_bitmap_bytes: u32,
@@ -106,6 +110,7 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
);
ExtractedMappings {
+ num_bcbs: basic_coverage_blocks.num_nodes(),
code_mappings,
branch_pairs,
mcdc_bitmap_bytes,
@@ -115,12 +120,10 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
}
impl ExtractedMappings {
- pub(super) fn all_bcbs_with_counter_mappings(
- &self,
- basic_coverage_blocks: &CoverageGraph, // Only used for allocating a correctly-sized set
- ) -> BitSet {
+ pub(super) fn all_bcbs_with_counter_mappings(&self) -> BitSet {
// Fully destructure self to make sure we don't miss any fields that have mappings.
let Self {
+ num_bcbs,
code_mappings,
branch_pairs,
mcdc_bitmap_bytes: _,
@@ -129,7 +132,7 @@ impl ExtractedMappings {
} = self;
// Identify which BCBs have one or more mappings.
- let mut bcbs_with_counter_mappings = BitSet::new_empty(basic_coverage_blocks.num_nodes());
+ let mut bcbs_with_counter_mappings = BitSet::new_empty(*num_bcbs);
let mut insert = |bcb| {
bcbs_with_counter_mappings.insert(bcb);
};
@@ -156,6 +159,15 @@ impl ExtractedMappings {
bcbs_with_counter_mappings
}
+
+ /// Returns the set of BCBs that have one or more `Code` mappings.
+ pub(super) fn bcbs_with_ordinary_code_mappings(&self) -> BitSet {
+ let mut bcbs = BitSet::new_empty(self.num_bcbs);
+ for &CodeMapping { span: _, bcb } in &self.code_mappings {
+ bcbs.insert(bcb);
+ }
+ bcbs
+ }
}
fn resolve_block_markers(
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 2efca40d1804..3772a8f51181 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -25,7 +25,7 @@ use rustc_span::source_map::SourceMap;
use rustc_span::{BytePos, Pos, RelativeBytePos, Span, Symbol};
use crate::coverage::counters::{CounterIncrementSite, CoverageCounters};
-use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
+use crate::coverage::graph::CoverageGraph;
use crate::coverage::mappings::ExtractedMappings;
use crate::MirPass;
@@ -88,8 +88,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
// every coverage span has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
// and all `Expression` dependencies (operands) are also generated, for any other
// `BasicCoverageBlock`s not already associated with a coverage span.
- let bcbs_with_counter_mappings =
- extracted_mappings.all_bcbs_with_counter_mappings(&basic_coverage_blocks);
+ let bcbs_with_counter_mappings = extracted_mappings.all_bcbs_with_counter_mappings();
if bcbs_with_counter_mappings.is_empty() {
// No relevant spans were found in MIR, so skip instrumenting this function.
return;
@@ -109,7 +108,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
inject_coverage_statements(
mir_body,
&basic_coverage_blocks,
- bcb_has_counter_mappings,
+ &extracted_mappings,
&coverage_counters,
);
@@ -163,6 +162,7 @@ fn create_mappings<'tcx>(
// Fully destructure the mappings struct to make sure we don't miss any kinds.
let ExtractedMappings {
+ num_bcbs: _,
code_mappings,
branch_pairs,
mcdc_bitmap_bytes: _,
@@ -219,7 +219,7 @@ fn create_mappings<'tcx>(
fn inject_coverage_statements<'tcx>(
mir_body: &mut mir::Body<'tcx>,
basic_coverage_blocks: &CoverageGraph,
- bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
+ extracted_mappings: &ExtractedMappings,
coverage_counters: &CoverageCounters,
) {
// Inject counter-increment statements into MIR.
@@ -252,11 +252,16 @@ fn inject_coverage_statements<'tcx>(
// can check whether the injected statement survived MIR optimization.
// (BCB edges can't have spans, so we only need to process BCB nodes here.)
//
+ // We only do this for ordinary `Code` mappings, because branch and MC/DC
+ // mappings might have expressions that don't correspond to any single
+ // point in the control-flow graph.
+ //
// See the code in `rustc_codegen_llvm::coverageinfo::map_data` that deals
// with "expressions seen" and "zero terms".
+ let eligible_bcbs = extracted_mappings.bcbs_with_ordinary_code_mappings();
for (bcb, expression_id) in coverage_counters
.bcb_nodes_with_coverage_expressions()
- .filter(|&(bcb, _)| bcb_has_coverage_spans(bcb))
+ .filter(|&(bcb, _)| eligible_bcbs.contains(bcb))
{
inject_statement(
mir_body,
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 8b965f4d18e4..8303ef039d18 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -66,7 +66,7 @@ impl<'tcx> MirPass<'tcx> for DataflowConstProp {
}
struct ConstAnalysis<'a, 'tcx> {
- map: Map,
+ map: Map<'tcx>,
tcx: TyCtxt<'tcx>,
local_decls: &'a LocalDecls<'tcx>,
ecx: InterpCx<'tcx, DummyMachine>,
@@ -78,7 +78,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
const NAME: &'static str = "ConstAnalysis";
- fn map(&self) -> &Map {
+ fn map(&self) -> &Map<'tcx> {
&self.map
}
@@ -330,7 +330,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
}
impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
- pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, map: Map) -> Self {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, map: Map<'tcx>) -> Self {
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
Self {
map,
@@ -560,12 +560,13 @@ impl<'tcx, 'locals> Collector<'tcx, 'locals> {
Self { patch: Patch::new(tcx), local_decls }
}
+ #[instrument(level = "trace", skip(self, ecx, map), ret)]
fn try_make_constant(
&self,
ecx: &mut InterpCx<'tcx, DummyMachine>,
place: Place<'tcx>,
state: &State>,
- map: &Map,
+ map: &Map<'tcx>,
) -> Option> {
let ty = place.ty(self.local_decls, self.patch.tcx).ty;
let layout = ecx.layout_of(ty).ok()?;
@@ -598,10 +599,11 @@ impl<'tcx, 'locals> Collector<'tcx, 'locals> {
}
}
+#[instrument(level = "trace", skip(map), ret)]
fn propagatable_scalar(
place: PlaceIndex,
state: &State>,
- map: &Map,
+ map: &Map<'_>,
) -> Option {
if let FlatSet::Elem(value) = state.get_idx(place, map)
&& value.try_to_scalar_int().is_ok()
@@ -613,14 +615,14 @@ fn propagatable_scalar(
}
}
-#[instrument(level = "trace", skip(ecx, state, map))]
+#[instrument(level = "trace", skip(ecx, state, map), ret)]
fn try_write_constant<'tcx>(
ecx: &mut InterpCx<'tcx, DummyMachine>,
dest: &PlaceTy<'tcx>,
place: PlaceIndex,
ty: Ty<'tcx>,
state: &State>,
- map: &Map,
+ map: &Map<'tcx>,
) -> InterpResult<'tcx> {
let layout = ecx.layout_of(ty)?;
@@ -719,6 +721,7 @@ impl<'mir, 'tcx>
{
type FlowState = State>;
+ #[instrument(level = "trace", skip(self, results, statement))]
fn visit_statement_before_primary_effect(
&mut self,
results: &mut Results<'tcx, ValueAnalysisWrapper>>,
@@ -740,6 +743,7 @@ impl<'mir, 'tcx>
}
}
+ #[instrument(level = "trace", skip(self, results, statement))]
fn visit_statement_after_primary_effect(
&mut self,
results: &mut Results<'tcx, ValueAnalysisWrapper>>,
@@ -834,7 +838,7 @@ struct OperandCollector<'tcx, 'map, 'locals, 'a> {
state: &'a State>,
visitor: &'a mut Collector<'tcx, 'locals>,
ecx: &'map mut InterpCx<'tcx, DummyMachine>,
- map: &'map Map,
+ map: &'map Map<'tcx>,
}
impl<'tcx> Visitor<'tcx> for OperandCollector<'tcx, '_, '_, '_> {
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index 97ec0cb39ded..2100f4b4a1af 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -123,7 +123,7 @@ struct TOFinder<'tcx, 'a> {
param_env: ty::ParamEnv<'tcx>,
ecx: InterpCx<'tcx, DummyMachine>,
body: &'a Body<'tcx>,
- map: &'a Map,
+ map: &'a Map<'tcx>,
loop_headers: &'a BitSet,
/// We use an arena to avoid cloning the slices when cloning `state`.
arena: &'a DroplessArena,
diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
index c90f8e761633..c23bc8f09ad1 100644
--- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
@@ -16,9 +16,9 @@ use crate::delegate::SolverDelegate;
use crate::solve::inspect::{self, ProofTreeBuilder};
use crate::solve::search_graph::SearchGraph;
use crate::solve::{
- search_graph, CanonicalInput, CanonicalResponse, Certainty, Goal, GoalEvaluationKind,
- GoalSource, MaybeCause, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData,
- QueryResult, SolverMode, FIXPOINT_STEP_LIMIT,
+ CanonicalInput, CanonicalResponse, Certainty, Goal, GoalEvaluationKind, GoalSource, MaybeCause,
+ NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryResult, SolverMode,
+ FIXPOINT_STEP_LIMIT,
};
pub(super) mod canonical;
@@ -72,7 +72,7 @@ where
/// new placeholders to the caller.
pub(super) max_input_universe: ty::UniverseIndex,
- pub(super) search_graph: &'a mut SearchGraph,
+ pub(super) search_graph: &'a mut SearchGraph,
nested_goals: NestedGoals,
@@ -200,7 +200,7 @@ where
generate_proof_tree: GenerateProofTree,
f: impl FnOnce(&mut EvalCtxt<'_, D>) -> R,
) -> (R, Option>) {
- let mut search_graph = search_graph::SearchGraph::new(delegate.solver_mode());
+ let mut search_graph = SearchGraph::new(delegate.solver_mode());
let mut ecx = EvalCtxt {
delegate,
@@ -241,7 +241,7 @@ where
/// and registering opaques from the canonicalized input.
fn enter_canonical(
cx: I,
- search_graph: &'a mut search_graph::SearchGraph,
+ search_graph: &'a mut SearchGraph,
canonical_input: CanonicalInput,
canonical_goal_evaluation: &mut ProofTreeBuilder,
f: impl FnOnce(&mut EvalCtxt<'_, D>, Goal) -> R,
@@ -296,7 +296,7 @@ where
#[instrument(level = "debug", skip(cx, search_graph, goal_evaluation), ret)]
fn evaluate_canonical_goal(
cx: I,
- search_graph: &'a mut search_graph::SearchGraph,
+ search_graph: &'a mut SearchGraph,
canonical_input: CanonicalInput,
goal_evaluation: &mut ProofTreeBuilder,
) -> QueryResult {
diff --git a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs
index b50676e8d532..3e266ddac71f 100644
--- a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs
@@ -8,7 +8,7 @@ use std::marker::PhantomData;
use std::mem;
use rustc_type_ir::inherent::*;
-use rustc_type_ir::{self as ty, Interner};
+use rustc_type_ir::{self as ty, search_graph, Interner};
use crate::delegate::SolverDelegate;
use crate::solve::eval_ctxt::canonical;
@@ -38,7 +38,7 @@ use crate::solve::{
/// trees. At the end of trait solving `ProofTreeBuilder::finalize`
/// is called to recursively convert the whole structure to a
/// finished proof tree.
-pub(in crate::solve) struct ProofTreeBuilder::Interner>
+pub(crate) struct ProofTreeBuilder::Interner>
where
D: SolverDelegate,
I: Interner,
@@ -321,23 +321,6 @@ impl, I: Interner> ProofTreeBuilder