Compare commits

..

1 commit

Author SHA1 Message Date
github-actions
87187f107b cargo update
compiler & tools dependencies:     Locking 132 packages to latest compatible versions
    Updating addr2line v0.24.2 -> v0.25.1
    Updating annotate-snippets v0.12.10 -> v0.12.11
    Updating anstyle-svg v0.1.11 -> v0.1.12 (available: v1.0.0)
    Updating anyhow v1.0.100 -> v1.0.101
    Updating assert_cmd v2.1.1 -> v2.1.2
    Updating backtrace v0.3.75 -> v0.3.76
    Updating bitflags v2.10.0 -> v2.11.0
    Updating blake3 v1.8.2 -> v1.8.3
    Updating bumpalo v3.19.0 -> v3.19.1
    Updating camino v1.2.1 -> v1.2.2
    Updating cargo-platform v0.3.1 -> v0.3.2
    Updating chrono v0.4.42 -> v0.4.43
    Updating clap v4.5.54 -> v4.5.58
    Updating clap_builder v4.5.54 -> v4.5.58
    Updating clap_derive v4.5.49 -> v4.5.55
    Removing clap_lex v0.7.6
      Adding clap_lex v0.7.7
      Adding clap_lex v1.0.0
    Updating colored v3.0.0 -> v3.1.1
      Adding console v0.16.2
    Updating constant_time_eq v0.3.1 -> v0.4.2
    Updating curl-sys v0.4.84+curl-8.17.0 -> v0.4.85+curl-8.18.0
    Updating cxx v1.0.188 -> v1.0.194
    Updating cxx-build v1.0.188 -> v1.0.194
    Updating cxxbridge-cmd v1.0.188 -> v1.0.194
    Updating cxxbridge-flags v1.0.188 -> v1.0.194
    Updating cxxbridge-macro v1.0.188 -> v1.0.194
      Adding darling v0.21.3
      Adding darling_core v0.21.3
      Adding darling_macro v0.21.3
    Updating dbus v0.9.9 -> v0.9.10
    Updating derive_setters v0.1.8 -> v0.1.9
    Updating ena v0.14.3 -> v0.14.4
    Updating env_filter v0.1.4 -> v1.0.0
    Updating env_logger v0.11.8 -> v0.11.9
    Updating filetime v0.2.26 -> v0.2.27
    Updating find-msvc-tools v0.1.5 -> v0.1.9
    Updating flate2 v1.1.5 -> v1.1.9
    Removing getrandom v0.2.16
      Adding getrandom v0.2.17
      Adding getrandom v0.4.1
    Updating git2 v0.20.2 -> v0.20.4
    Updating iana-time-zone v0.1.64 -> v0.1.65
    Updating icu_locale_data v2.1.1 -> v2.1.2
    Updating icu_properties v2.1.1 -> v2.1.2
    Updating icu_properties_data v2.1.1 -> v2.1.2
    Updating id-arena v2.2.1 -> v2.3.0
      Adding indicatif v0.18.4
    Updating itoa v1.0.15 -> v1.0.17
    Updating jiff v0.2.16 -> v0.2.20
    Updating jiff-static v0.2.16 -> v0.2.20
    Updating js-sys v0.3.82 -> v0.3.85
    Updating libc v0.2.177 -> v0.2.182
    Updating libdbus-sys v0.2.6 -> v0.2.7
    Updating libgit2-sys v0.18.2+1.9.1 -> v0.18.3+1.9.2
    Updating libm v0.2.15 -> v0.2.16
    Updating libredox v0.1.10 -> v0.1.12
    Updating log v0.4.28 -> v0.4.29
    Updating memchr v2.7.6 -> v2.8.0
    Updating mio v1.1.0 -> v1.1.1
 Downgrading objc2-core-foundation v0.3.2 -> v0.3.1 (available: v0.3.2)
 Downgrading objc2-io-kit v0.3.2 -> v0.3.1 (available: v0.3.2)
    Updating pest v2.8.3 -> v2.8.6
    Updating pest_derive v2.8.3 -> v2.8.6
    Updating pest_generator v2.8.3 -> v2.8.6
    Updating pest_meta v2.8.3 -> v2.8.6
    Updating portable-atomic v1.11.1 -> v1.13.1
    Updating portable-atomic-util v0.2.4 -> v0.2.5
    Updating predicates v3.1.3 -> v3.1.4
    Updating predicates-core v1.0.9 -> v1.0.10
    Updating predicates-tree v1.0.12 -> v1.0.13
      Adding prettyplease v0.2.37
    Updating proc-macro2 v1.0.103 -> v1.0.106
    Updating quote v1.0.42 -> v1.0.44
    Updating rand_core v0.9.3 -> v0.9.5
      Adding redox_syscall v0.7.1
    Updating regex v1.12.2 -> v1.12.3
    Updating regex-automata v0.4.13 -> v0.4.14
    Updating regex-lite v0.1.8 -> v0.1.9
    Updating regex-syntax v0.8.8 -> v0.8.9
    Updating rustix v1.1.2 -> v1.1.3
    Removing ryu v1.0.20
    Updating schemars v1.1.0 -> v1.2.1
    Updating schemars_derive v1.1.0 -> v1.2.1
    Updating self_cell v1.2.1 -> v1.2.2
    Updating serde_json v1.0.145 -> v1.0.149
    Updating serde_spanned v1.0.3 -> v1.0.4
    Updating simd-adler32 v0.3.7 -> v0.3.8
    Updating siphasher v1.0.1 -> v1.0.2
    Updating socket2 v0.6.1 -> v0.6.2
    Updating syn v2.0.110 -> v2.0.115
    Updating sysinfo v0.38.0 -> v0.38.1
    Updating tempfile v3.23.0 -> v3.25.0
    Updating thiserror v2.0.17 -> v2.0.18
    Updating thiserror-impl v2.0.17 -> v2.0.18
    Updating toml v0.9.8 -> v0.9.12+spec-1.1.0 (available: v1.0.1+spec-1.1.0)
    Updating toml_datetime v0.7.3 -> v0.7.5+spec-1.1.0
    Updating toml_parser v1.0.4 -> v1.0.8+spec-1.1.0
    Updating toml_writer v1.0.4 -> v1.0.6+spec-1.1.0
    Updating tracing v0.1.41 -> v0.1.44
    Updating tracing-attributes v0.1.30 -> v0.1.31
    Updating tracing-core v0.1.34 -> v0.1.36
    Updating tracing-subscriber v0.3.20 -> v0.3.22
    Updating ui_test v0.30.3 -> v0.30.4
    Updating unicase v2.8.1 -> v2.9.0
    Updating unicode-ident v1.0.22 -> v1.0.23
    Updating unicode-script v0.5.7 -> v0.5.8
      Adding unit-prefix v0.5.2
    Updating url v2.5.7 -> v2.5.8
    Updating utf8-width v0.1.7 -> v0.1.8
    Updating uuid v1.18.1 -> v1.21.0
    Updating wasi-preview1-component-adapter-provider v40.0.0 -> v40.0.3
      Adding wasip2 v1.0.2+wasi-0.2.9
      Adding wasip3 v0.4.0+wasi-0.3.0-rc-2026-01-06
    Updating wasm-bindgen v0.2.105 -> v0.2.108
    Updating wasm-bindgen-macro v0.2.105 -> v0.2.108
    Updating wasm-bindgen-macro-support v0.2.105 -> v0.2.108
    Updating wasm-bindgen-shared v0.2.105 -> v0.2.108
      Adding wasm-encoder v0.244.0
      Adding wasm-encoder v0.245.1
      Adding wasm-metadata v0.244.0
      Adding wasmparser v0.244.0
      Adding wasmparser v0.245.1
    Updating wast v243.0.0 -> v245.0.1
    Updating wat v1.243.0 -> v1.245.1
    Updating winnow v0.7.13 -> v0.7.14
      Adding wit-bindgen v0.51.0
      Adding wit-bindgen-core v0.51.0
      Adding wit-bindgen-rust v0.51.0
      Adding wit-bindgen-rust-macro v0.51.0
      Adding wit-component v0.244.0
      Adding wit-parser v0.244.0
    Updating zerocopy v0.8.27 -> v0.8.39
    Updating zerocopy-derive v0.8.27 -> v0.8.39
      Adding zmij v1.0.21
note: pass `--verbose` to see 47 unchanged dependencies behind latest

library dependencies:     Locking 5 packages to latest compatible versions
    Updating dlmalloc v0.2.11 -> v0.2.12
    Updating libc v0.2.178 -> v0.2.182
    Updating memchr v2.7.6 -> v2.8.0
    Updating rand_core v0.9.3 -> v0.9.5
    Updating windows-sys v0.60.2 -> v0.61.2
    Removing windows-targets v0.53.5
    Removing windows_aarch64_gnullvm v0.53.1
    Removing windows_aarch64_msvc v0.53.1
    Removing windows_i686_gnu v0.53.1
    Removing windows_i686_gnullvm v0.53.1
    Removing windows_i686_msvc v0.53.1
    Removing windows_x86_64_gnu v0.53.1
    Removing windows_x86_64_gnullvm v0.53.1
    Removing windows_x86_64_msvc v0.53.1
note: pass `--verbose` to see 10 unchanged dependencies behind latest

rustbook dependencies:     Locking 74 packages to latest compatible versions
    Updating anstyle-query v1.1.4 -> v1.1.5
    Updating anstyle-wincon v3.0.10 -> v3.0.11
    Updating anyhow v1.0.100 -> v1.0.101
    Updating bitflags v2.10.0 -> v2.11.0
    Updating bumpalo v3.19.0 -> v3.19.1
    Updating cc v1.2.45 -> v1.2.56
    Updating chrono v0.4.42 -> v0.4.43
    Updating clap v4.5.51 -> v4.5.58
    Updating clap_builder v4.5.51 -> v4.5.58
    Updating clap_derive v4.5.49 -> v4.5.55
    Updating clap_lex v0.7.6 -> v1.0.0
    Updating crypto-common v0.1.6 -> v0.1.7
    Updating find-msvc-tools v0.1.4 -> v0.1.9
    Updating flate2 v1.1.5 -> v1.1.9
      Adding foldhash v0.1.5
 Downgrading generic-array v0.14.9 -> v0.14.7 (available: v0.14.9)
    Updating getrandom v0.3.4 -> v0.4.1
    Updating handlebars v6.3.2 -> v6.4.0
      Adding hashbrown v0.15.5
    Updating iana-time-zone v0.1.64 -> v0.1.65
      Adding id-arena v2.3.0
    Updating indexmap v2.12.1 -> v2.13.0
    Updating itoa v1.0.15 -> v1.0.17
    Updating js-sys v0.3.82 -> v0.3.85
      Adding leb128fmt v0.1.0
    Updating libc v0.2.177 -> v0.2.182
    Updating log v0.4.28 -> v0.4.29
    Updating memchr v2.7.6 -> v2.8.0
    Updating pest v2.8.3 -> v2.8.6
    Updating pest_derive v2.8.3 -> v2.8.6
    Updating pest_generator v2.8.3 -> v2.8.6
    Updating pest_meta v2.8.3 -> v2.8.6
      Adding prettyplease v0.2.37
    Updating proc-macro2 v1.0.103 -> v1.0.106
    Updating quote v1.0.42 -> v1.0.44
    Updating regex v1.12.2 -> v1.12.3
    Updating regex-automata v0.4.13 -> v0.4.14
    Updating regex-syntax v0.8.8 -> v0.8.9
    Updating rustix v1.1.2 -> v1.1.3
    Removing ryu v1.0.20
    Updating serde_json v1.0.145 -> v1.0.149
    Updating serde_spanned v1.0.3 -> v1.0.4
    Updating simd-adler32 v0.3.7 -> v0.3.8
    Updating siphasher v1.0.1 -> v1.0.2
    Updating syn v2.0.110 -> v2.0.115
    Updating tempfile v3.23.0 -> v3.25.0
    Updating thiserror v2.0.17 -> v2.0.18
    Updating thiserror-impl v2.0.17 -> v2.0.18
    Updating toml v0.9.8 -> v0.9.12+spec-1.1.0
    Updating toml_datetime v0.7.3 -> v0.7.5+spec-1.1.0
    Updating toml_parser v1.0.4 -> v1.0.8+spec-1.1.0
    Updating toml_writer v1.0.4 -> v1.0.6+spec-1.1.0
    Updating tracing v0.1.43 -> v0.1.44
    Updating tracing-core v0.1.35 -> v0.1.36
    Updating unicase v2.8.1 -> v2.9.0
    Updating unicode-ident v1.0.22 -> v1.0.23
      Adding unicode-xid v0.2.6
    Updating wasip2 v1.0.1+wasi-0.2.4 -> v1.0.2+wasi-0.2.9
      Adding wasip3 v0.4.0+wasi-0.3.0-rc-2026-01-06
    Updating wasm-bindgen v0.2.105 -> v0.2.108
    Updating wasm-bindgen-macro v0.2.105 -> v0.2.108
    Updating wasm-bindgen-macro-support v0.2.105 -> v0.2.108
    Updating wasm-bindgen-shared v0.2.105 -> v0.2.108
      Adding wasm-encoder v0.244.0
      Adding wasm-metadata v0.244.0
      Adding wasmparser v0.244.0
    Updating web_atoms v0.2.0 -> v0.2.3
    Removing windows-sys v0.60.2
    Removing windows-targets v0.53.5
    Removing windows_aarch64_gnullvm v0.53.1
    Removing windows_aarch64_msvc v0.53.1
    Removing windows_i686_gnu v0.53.1
    Removing windows_i686_gnullvm v0.53.1
    Removing windows_i686_msvc v0.53.1
    Removing windows_x86_64_gnu v0.53.1
    Removing windows_x86_64_gnullvm v0.53.1
    Removing windows_x86_64_msvc v0.53.1
    Updating winnow v0.7.13 -> v0.7.14
    Updating wit-bindgen v0.46.0 -> v0.51.0
      Adding wit-bindgen-core v0.51.0
      Adding wit-bindgen-rust v0.51.0
      Adding wit-bindgen-rust-macro v0.51.0
      Adding wit-component v0.244.0
      Adding wit-parser v0.244.0
      Adding zmij v1.0.21
2026-02-15 00:32:46 +00:00
1056 changed files with 11969 additions and 15330 deletions

File diff suppressed because it is too large Load diff

View file

@ -3131,16 +3131,8 @@ pub enum Const {
/// For details see the [RFC #2532](https://github.com/rust-lang/rfcs/pull/2532). /// For details see the [RFC #2532](https://github.com/rust-lang/rfcs/pull/2532).
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic, Walkable)] #[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic, Walkable)]
pub enum Defaultness { pub enum Defaultness {
/// Item is unmarked. Implicitly determined based off of position.
/// For impls, this is `final`; for traits, this is `default`.
///
/// If you're expanding an item in a built-in macro or parsing an item
/// by hand, you probably want to use this.
Implicit,
/// `default`
Default(Span), Default(Span),
/// `final`; per RFC 3678, only trait items may be *explicitly* marked final. Final,
Final(Span),
} }
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)] #[derive(Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)]
@ -4148,7 +4140,7 @@ impl AssocItemKind {
| Self::Fn(box Fn { defaultness, .. }) | Self::Fn(box Fn { defaultness, .. })
| Self::Type(box TyAlias { defaultness, .. }) => defaultness, | Self::Type(box TyAlias { defaultness, .. }) => defaultness,
Self::MacCall(..) | Self::Delegation(..) | Self::DelegationMac(..) => { Self::MacCall(..) | Self::Delegation(..) | Self::DelegationMac(..) => {
Defaultness::Implicit Defaultness::Final
} }
} }
} }

View file

@ -939,7 +939,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
); );
let trait_item_def_id = hir_id.expect_owner(); let trait_item_def_id = hir_id.expect_owner();
let (ident, generics, kind, has_value) = match &i.kind { let (ident, generics, kind, has_default) = match &i.kind {
AssocItemKind::Const(box ConstItem { AssocItemKind::Const(box ConstItem {
ident, ident,
generics, generics,
@ -1088,17 +1088,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
} }
}; };
let (defaultness, _) = self.lower_defaultness(i.kind.defaultness(), has_value, || {
hir::Defaultness::Default { has_value }
});
let item = hir::TraitItem { let item = hir::TraitItem {
owner_id: trait_item_def_id, owner_id: trait_item_def_id,
ident: self.lower_ident(ident), ident: self.lower_ident(ident),
generics, generics,
kind, kind,
span: self.lower_span(i.span), span: self.lower_span(i.span),
defaultness, defaultness: hir::Defaultness::Default { has_value: has_default },
has_delayed_lints: !self.delayed_lints.is_empty(), has_delayed_lints: !self.delayed_lints.is_empty(),
}; };
self.arena.alloc(item) self.arena.alloc(item)
@ -1126,8 +1122,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
// `defaultness.has_value()` is never called for an `impl`, always `true` in order // `defaultness.has_value()` is never called for an `impl`, always `true` in order
// to not cause an assertion failure inside the `lower_defaultness` function. // to not cause an assertion failure inside the `lower_defaultness` function.
let has_val = true; let has_val = true;
let (defaultness, defaultness_span) = let (defaultness, defaultness_span) = self.lower_defaultness(defaultness, has_val);
self.lower_defaultness(defaultness, has_val, || hir::Defaultness::Final);
let modifiers = TraitBoundModifiers { let modifiers = TraitBoundModifiers {
constness: BoundConstness::Never, constness: BoundConstness::Never,
asyncness: BoundAsyncness::Normal, asyncness: BoundAsyncness::Normal,
@ -1156,8 +1151,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
) -> &'hir hir::ImplItem<'hir> { ) -> &'hir hir::ImplItem<'hir> {
// Since `default impl` is not yet implemented, this is always true in impls. // Since `default impl` is not yet implemented, this is always true in impls.
let has_value = true; let has_value = true;
let (defaultness, _) = let (defaultness, _) = self.lower_defaultness(i.kind.defaultness(), has_value);
self.lower_defaultness(i.kind.defaultness(), has_value, || hir::Defaultness::Final);
let hir_id = hir::HirId::make_owner(self.current_hir_id_owner.def_id); let hir_id = hir::HirId::make_owner(self.current_hir_id_owner.def_id);
let attrs = self.lower_attrs( let attrs = self.lower_attrs(
hir_id, hir_id,
@ -1310,14 +1304,15 @@ impl<'hir> LoweringContext<'_, 'hir> {
&self, &self,
d: Defaultness, d: Defaultness,
has_value: bool, has_value: bool,
implicit: impl FnOnce() -> hir::Defaultness,
) -> (hir::Defaultness, Option<Span>) { ) -> (hir::Defaultness, Option<Span>) {
match d { match d {
Defaultness::Implicit => (implicit(), None),
Defaultness::Default(sp) => { Defaultness::Default(sp) => {
(hir::Defaultness::Default { has_value }, Some(self.lower_span(sp))) (hir::Defaultness::Default { has_value }, Some(self.lower_span(sp)))
} }
Defaultness::Final(sp) => (hir::Defaultness::Final, Some(self.lower_span(sp))), Defaultness::Final => {
assert!(has_value);
(hir::Defaultness::Final, None)
}
} }
} }

View file

@ -65,28 +65,6 @@ impl TraitOrImpl {
} }
} }
enum AllowDefault {
Yes,
No,
}
impl AllowDefault {
fn when(b: bool) -> Self {
if b { Self::Yes } else { Self::No }
}
}
enum AllowFinal {
Yes,
No,
}
impl AllowFinal {
fn when(b: bool) -> Self {
if b { Self::Yes } else { Self::No }
}
}
struct AstValidator<'a> { struct AstValidator<'a> {
sess: &'a Session, sess: &'a Session,
features: &'a Features, features: &'a Features,
@ -585,32 +563,10 @@ impl<'a> AstValidator<'a> {
} }
} }
fn check_defaultness( fn check_defaultness(&self, span: Span, defaultness: Defaultness) {
&self, if let Defaultness::Default(def_span) = defaultness {
span: Span, let span = self.sess.source_map().guess_head_span(span);
defaultness: Defaultness, self.dcx().emit_err(errors::ForbiddenDefault { span, def_span });
allow_default: AllowDefault,
allow_final: AllowFinal,
) {
match defaultness {
Defaultness::Default(def_span) if matches!(allow_default, AllowDefault::No) => {
let span = self.sess.source_map().guess_head_span(span);
self.dcx().emit_err(errors::ForbiddenDefault { span, def_span });
}
Defaultness::Final(def_span) if matches!(allow_final, AllowFinal::No) => {
let span = self.sess.source_map().guess_head_span(span);
self.dcx().emit_err(errors::ForbiddenFinal { span, def_span });
}
_ => (),
}
}
fn check_final_has_body(&self, item: &Item<AssocItemKind>, defaultness: Defaultness) {
if let AssocItemKind::Fn(box Fn { body: None, .. }) = &item.kind
&& let Defaultness::Final(def_span) = defaultness
{
let span = self.sess.source_map().guess_head_span(item.span);
self.dcx().emit_err(errors::ForbiddenFinalWithoutBody { span, def_span });
} }
} }
@ -742,11 +698,13 @@ impl<'a> AstValidator<'a> {
unreachable!("C variable argument list cannot be used in closures") unreachable!("C variable argument list cannot be used in closures")
}; };
if let Const::Yes(_) = sig.header.constness // C-variadics are not yet implemented in const evaluation.
&& !self.features.enabled(sym::const_c_variadic) if let Const::Yes(const_span) = sig.header.constness {
{ self.dcx().emit_err(errors::ConstAndCVariadic {
let msg = format!("c-variadic const function definitions are unstable"); spans: vec![const_span, variadic_param.span],
feature_err(&self.sess, sym::const_c_variadic, sig.span, msg).emit(); const_span,
variadic_span: variadic_param.span,
});
} }
if let Some(coroutine_kind) = sig.header.coroutine_kind { if let Some(coroutine_kind) = sig.header.coroutine_kind {
@ -1234,7 +1192,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}, },
) => { ) => {
self.visit_attrs_vis_ident(&item.attrs, &item.vis, ident); self.visit_attrs_vis_ident(&item.attrs, &item.vis, ident);
self.check_defaultness(item.span, *defaultness, AllowDefault::No, AllowFinal::No); self.check_defaultness(item.span, *defaultness);
for EiiImpl { eii_macro_path, .. } in eii_impls { for EiiImpl { eii_macro_path, .. } in eii_impls {
self.visit_path(eii_macro_path); self.visit_path(eii_macro_path);
@ -1404,7 +1362,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}); });
} }
ItemKind::Const(box ConstItem { defaultness, ident, rhs_kind, .. }) => { ItemKind::Const(box ConstItem { defaultness, ident, rhs_kind, .. }) => {
self.check_defaultness(item.span, *defaultness, AllowDefault::No, AllowFinal::No); self.check_defaultness(item.span, *defaultness);
if !rhs_kind.has_expr() { if !rhs_kind.has_expr() {
self.dcx().emit_err(errors::ConstWithoutBody { self.dcx().emit_err(errors::ConstWithoutBody {
span: item.span, span: item.span,
@ -1442,7 +1400,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
ItemKind::TyAlias( ItemKind::TyAlias(
ty_alias @ box TyAlias { defaultness, bounds, after_where_clause, ty, .. }, ty_alias @ box TyAlias { defaultness, bounds, after_where_clause, ty, .. },
) => { ) => {
self.check_defaultness(item.span, *defaultness, AllowDefault::No, AllowFinal::No); self.check_defaultness(item.span, *defaultness);
if ty.is_none() { if ty.is_none() {
self.dcx().emit_err(errors::TyAliasWithoutBody { self.dcx().emit_err(errors::TyAliasWithoutBody {
span: item.span, span: item.span,
@ -1472,7 +1430,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
fn visit_foreign_item(&mut self, fi: &'a ForeignItem) { fn visit_foreign_item(&mut self, fi: &'a ForeignItem) {
match &fi.kind { match &fi.kind {
ForeignItemKind::Fn(box Fn { defaultness, ident, sig, body, .. }) => { ForeignItemKind::Fn(box Fn { defaultness, ident, sig, body, .. }) => {
self.check_defaultness(fi.span, *defaultness, AllowDefault::No, AllowFinal::No); self.check_defaultness(fi.span, *defaultness);
self.check_foreign_fn_bodyless(*ident, body.as_deref()); self.check_foreign_fn_bodyless(*ident, body.as_deref());
self.check_foreign_fn_headerless(sig.header); self.check_foreign_fn_headerless(sig.header);
self.check_foreign_item_ascii_only(*ident); self.check_foreign_item_ascii_only(*ident);
@ -1492,7 +1450,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
ty, ty,
.. ..
}) => { }) => {
self.check_defaultness(fi.span, *defaultness, AllowDefault::No, AllowFinal::No); self.check_defaultness(fi.span, *defaultness);
self.check_foreign_kind_bodyless(*ident, "type", ty.as_ref().map(|b| b.span)); self.check_foreign_kind_bodyless(*ident, "type", ty.as_ref().map(|b| b.span));
self.check_type_no_bounds(bounds, "`extern` blocks"); self.check_type_no_bounds(bounds, "`extern` blocks");
self.check_foreign_ty_genericless(generics, after_where_clause); self.check_foreign_ty_genericless(generics, after_where_clause);
@ -1751,19 +1709,9 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.check_nomangle_item_asciionly(ident, item.span); self.check_nomangle_item_asciionly(ident, item.span);
} }
let defaultness = item.kind.defaultness(); if ctxt == AssocCtxt::Trait || self.outer_trait_or_trait_impl.is_none() {
self.check_defaultness( self.check_defaultness(item.span, item.kind.defaultness());
item.span, }
defaultness,
// `default` is allowed on all associated items in impls.
AllowDefault::when(matches!(ctxt, AssocCtxt::Impl { .. })),
// `final` is allowed on all associated *functions* in traits.
AllowFinal::when(
ctxt == AssocCtxt::Trait && matches!(item.kind, AssocItemKind::Fn(..)),
),
);
self.check_final_has_body(item, defaultness);
if let AssocCtxt::Impl { .. } = ctxt { if let AssocCtxt::Impl { .. } = ctxt {
match &item.kind { match &item.kind {

View file

@ -159,24 +159,6 @@ pub(crate) struct ForbiddenDefault {
pub def_span: Span, pub def_span: Span,
} }
#[derive(Diagnostic)]
#[diag("`final` is only allowed on associated functions in traits")]
pub(crate) struct ForbiddenFinal {
#[primary_span]
pub span: Span,
#[label("`final` because of this")]
pub def_span: Span,
}
#[derive(Diagnostic)]
#[diag("`final` is only allowed on associated functions if they have a body")]
pub(crate) struct ForbiddenFinalWithoutBody {
#[primary_span]
pub span: Span,
#[label("`final` because of this")]
pub def_span: Span,
}
#[derive(Diagnostic)] #[derive(Diagnostic)]
#[diag("associated constant in `impl` without body")] #[diag("associated constant in `impl` without body")]
pub(crate) struct AssocConstWithoutBody { pub(crate) struct AssocConstWithoutBody {
@ -841,6 +823,17 @@ pub(crate) struct ConstAndCoroutine {
pub coroutine_kind: &'static str, pub coroutine_kind: &'static str,
} }
#[derive(Diagnostic)]
#[diag("functions cannot be both `const` and C-variadic")]
pub(crate) struct ConstAndCVariadic {
#[primary_span]
pub spans: Vec<Span>,
#[label("`const` because of this")]
pub const_span: Span,
#[label("C-variadic because of this")]
pub variadic_span: Span,
}
#[derive(Diagnostic)] #[derive(Diagnostic)]
#[diag("functions cannot be both `{$coroutine_kind}` and C-variadic")] #[diag("functions cannot be both `{$coroutine_kind}` and C-variadic")]
pub(crate) struct CoroutineAndCVariadic { pub(crate) struct CoroutineAndCVariadic {

View file

@ -580,7 +580,6 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) {
gate_all!(frontmatter, "frontmatters are experimental"); gate_all!(frontmatter, "frontmatters are experimental");
gate_all!(coroutines, "coroutine syntax is experimental"); gate_all!(coroutines, "coroutine syntax is experimental");
gate_all!(const_block_items, "const block items are experimental"); gate_all!(const_block_items, "const block items are experimental");
gate_all!(final_associated_functions, "`final` on trait functions is experimental");
if !visitor.features.never_patterns() { if !visitor.features.never_patterns() {
if let Some(spans) = spans.get(&sym::never_patterns) { if let Some(spans) = spans.get(&sym::never_patterns) {

View file

@ -51,7 +51,7 @@ impl<'a> State<'a> {
expr.as_deref(), expr.as_deref(),
vis, vis,
*safety, *safety,
ast::Defaultness::Implicit, ast::Defaultness::Final,
define_opaque.as_deref(), define_opaque.as_deref(),
), ),
ast::ForeignItemKind::TyAlias(box ast::TyAlias { ast::ForeignItemKind::TyAlias(box ast::TyAlias {
@ -201,7 +201,7 @@ impl<'a> State<'a> {
body.as_deref(), body.as_deref(),
&item.vis, &item.vis,
ast::Safety::Default, ast::Safety::Default,
ast::Defaultness::Implicit, ast::Defaultness::Final,
define_opaque.as_deref(), define_opaque.as_deref(),
); );
} }

View file

@ -292,12 +292,3 @@ impl<S: Stage> NoArgsAttributeParser<S> for RustcNoImplicitBoundsParser {
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]); const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcNoImplicitBounds; const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcNoImplicitBounds;
} }
pub(crate) struct DefaultLibAllocatorParser;
impl<S: Stage> NoArgsAttributeParser<S> for DefaultLibAllocatorParser {
const PATH: &[Symbol] = &[sym::default_lib_allocator];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::DefaultLibAllocator;
}

View file

@ -1178,30 +1178,6 @@ impl<S: Stage> SingleAttributeParser<S> for RustcDiagnosticItemParser {
} }
} }
pub(crate) struct RustcDoNotConstCheckParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcDoNotConstCheckParser {
const PATH: &[Symbol] = &[sym::rustc_do_not_const_check];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[
Allow(Target::Fn),
Allow(Target::Method(MethodKind::Inherent)),
Allow(Target::Method(MethodKind::TraitImpl)),
Allow(Target::Method(MethodKind::Trait { body: false })),
Allow(Target::Method(MethodKind::Trait { body: true })),
]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcDoNotConstCheck;
}
pub(crate) struct RustcNonnullOptimizationGuaranteedParser;
impl<S: Stage> NoArgsAttributeParser<S> for RustcNonnullOptimizationGuaranteedParser {
const PATH: &[Symbol] = &[sym::rustc_nonnull_optimization_guaranteed];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Struct)]);
const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcNonnullOptimizationGuaranteed;
}
pub(crate) struct RustcSymbolName; pub(crate) struct RustcSymbolName;
impl<S: Stage> SingleAttributeParser<S> for RustcSymbolName { impl<S: Stage> SingleAttributeParser<S> for RustcSymbolName {

View file

@ -235,7 +235,6 @@ attribute_parsers!(
Single<WithoutArgs<ConstContinueParser>>, Single<WithoutArgs<ConstContinueParser>>,
Single<WithoutArgs<ConstStabilityIndirectParser>>, Single<WithoutArgs<ConstStabilityIndirectParser>>,
Single<WithoutArgs<CoroutineParser>>, Single<WithoutArgs<CoroutineParser>>,
Single<WithoutArgs<DefaultLibAllocatorParser>>,
Single<WithoutArgs<DenyExplicitImplParser>>, Single<WithoutArgs<DenyExplicitImplParser>>,
Single<WithoutArgs<DynIncompatibleTraitParser>>, Single<WithoutArgs<DynIncompatibleTraitParser>>,
Single<WithoutArgs<EiiForeignItemParser>>, Single<WithoutArgs<EiiForeignItemParser>>,
@ -275,7 +274,6 @@ attribute_parsers!(
Single<WithoutArgs<RustcConversionSuggestionParser>>, Single<WithoutArgs<RustcConversionSuggestionParser>>,
Single<WithoutArgs<RustcDeallocatorParser>>, Single<WithoutArgs<RustcDeallocatorParser>>,
Single<WithoutArgs<RustcDelayedBugFromInsideQueryParser>>, Single<WithoutArgs<RustcDelayedBugFromInsideQueryParser>>,
Single<WithoutArgs<RustcDoNotConstCheckParser>>,
Single<WithoutArgs<RustcDumpDefParentsParser>>, Single<WithoutArgs<RustcDumpDefParentsParser>>,
Single<WithoutArgs<RustcDumpItemBoundsParser>>, Single<WithoutArgs<RustcDumpItemBoundsParser>>,
Single<WithoutArgs<RustcDumpPredicatesParser>>, Single<WithoutArgs<RustcDumpPredicatesParser>>,
@ -297,7 +295,6 @@ attribute_parsers!(
Single<WithoutArgs<RustcNoImplicitBoundsParser>>, Single<WithoutArgs<RustcNoImplicitBoundsParser>>,
Single<WithoutArgs<RustcNoMirInlineParser>>, Single<WithoutArgs<RustcNoMirInlineParser>>,
Single<WithoutArgs<RustcNonConstTraitMethodParser>>, Single<WithoutArgs<RustcNonConstTraitMethodParser>>,
Single<WithoutArgs<RustcNonnullOptimizationGuaranteedParser>>,
Single<WithoutArgs<RustcNounwindParser>>, Single<WithoutArgs<RustcNounwindParser>>,
Single<WithoutArgs<RustcObjectLifetimeDefaultParser>>, Single<WithoutArgs<RustcObjectLifetimeDefaultParser>>,
Single<WithoutArgs<RustcOffloadKernelParser>>, Single<WithoutArgs<RustcOffloadKernelParser>>,

View file

@ -1544,7 +1544,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> {
Rvalue::Use(operand) Rvalue::Use(operand)
| Rvalue::Repeat(operand, _) | Rvalue::Repeat(operand, _)
| Rvalue::UnaryOp(_ /*un_op*/, operand) | Rvalue::UnaryOp(_ /*un_op*/, operand)
| Rvalue::Cast(_ /*cast_kind*/, operand, _ /*ty*/) => { | Rvalue::Cast(_ /*cast_kind*/, operand, _ /*ty*/)
| Rvalue::ShallowInitBox(operand, _ /*ty*/) => {
self.consume_operand(location, (operand, span), state) self.consume_operand(location, (operand, span), state)
} }

View file

@ -297,9 +297,8 @@ impl<'a, 'tcx> LoanInvalidationsGenerator<'a, 'tcx> {
Rvalue::Use(operand) Rvalue::Use(operand)
| Rvalue::Repeat(operand, _) | Rvalue::Repeat(operand, _)
| Rvalue::UnaryOp(_ /*un_op*/, operand) | Rvalue::UnaryOp(_ /*un_op*/, operand)
| Rvalue::Cast(_ /*cast_kind*/, operand, _ /*ty*/) => { | Rvalue::Cast(_ /*cast_kind*/, operand, _ /*ty*/)
self.consume_operand(location, operand) | Rvalue::ShallowInitBox(operand, _ /*ty*/) => self.consume_operand(location, operand),
}
&Rvalue::Discriminant(place) => { &Rvalue::Discriminant(place) => {
self.access_place( self.access_place(

View file

@ -1004,6 +1004,17 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
} }
Rvalue::ShallowInitBox(_operand, ty) => {
let trait_ref =
ty::TraitRef::new(tcx, tcx.require_lang_item(LangItem::Sized, span), [*ty]);
self.prove_trait_ref(
trait_ref,
location.to_locations(),
ConstraintCategory::SizedBound,
);
}
Rvalue::Cast(cast_kind, op, ty) => { Rvalue::Cast(cast_kind, op, ty) => {
match *cast_kind { match *cast_kind {
CastKind::PointerCoercion( CastKind::PointerCoercion(
@ -2220,6 +2231,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
| Rvalue::Ref(..) | Rvalue::Ref(..)
| Rvalue::RawPtr(..) | Rvalue::RawPtr(..)
| Rvalue::Cast(..) | Rvalue::Cast(..)
| Rvalue::ShallowInitBox(..)
| Rvalue::BinaryOp(..) | Rvalue::BinaryOp(..)
| Rvalue::CopyForDeref(..) | Rvalue::CopyForDeref(..)
| Rvalue::UnaryOp(..) | Rvalue::UnaryOp(..)

View file

@ -83,7 +83,7 @@ fn generate_handler(cx: &ExtCtxt<'_>, handler: Ident, span: Span, sig_span: Span
let body = Some(cx.block_expr(call)); let body = Some(cx.block_expr(call));
let kind = ItemKind::Fn(Box::new(Fn { let kind = ItemKind::Fn(Box::new(Fn {
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
sig, sig,
ident: Ident::from_str_and_span(&global_fn_name(ALLOC_ERROR_HANDLER), span), ident: Ident::from_str_and_span(&global_fn_name(ALLOC_ERROR_HANDLER), span),
generics: Generics::default(), generics: Generics::default(),

View file

@ -334,7 +334,7 @@ mod llvm_enzyme {
// The first element of it is the name of the function to be generated // The first element of it is the name of the function to be generated
let d_fn = Box::new(ast::Fn { let d_fn = Box::new(ast::Fn {
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
sig: d_sig, sig: d_sig,
ident: first_ident(&meta_item_vec[0]), ident: first_ident(&meta_item_vec[0]),
generics, generics,

View file

@ -136,7 +136,7 @@ pub(crate) fn expand_deriving_coerce_pointee(
of_trait: Some(Box::new(ast::TraitImplHeader { of_trait: Some(Box::new(ast::TraitImplHeader {
safety: ast::Safety::Default, safety: ast::Safety::Default,
polarity: ast::ImplPolarity::Positive, polarity: ast::ImplPolarity::Positive,
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
trait_ref, trait_ref,
})), })),
constness: ast::Const::No, constness: ast::Const::No,
@ -159,7 +159,7 @@ pub(crate) fn expand_deriving_coerce_pointee(
of_trait: Some(Box::new(ast::TraitImplHeader { of_trait: Some(Box::new(ast::TraitImplHeader {
safety: ast::Safety::Default, safety: ast::Safety::Default,
polarity: ast::ImplPolarity::Positive, polarity: ast::ImplPolarity::Positive,
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
trait_ref, trait_ref,
})), })),
constness: ast::Const::No, constness: ast::Const::No,

View file

@ -614,7 +614,7 @@ impl<'a> TraitDef<'a> {
}, },
attrs: ast::AttrVec::new(), attrs: ast::AttrVec::new(),
kind: ast::AssocItemKind::Type(Box::new(ast::TyAlias { kind: ast::AssocItemKind::Type(Box::new(ast::TyAlias {
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
ident, ident,
generics: Generics::default(), generics: Generics::default(),
after_where_clause: ast::WhereClause::default(), after_where_clause: ast::WhereClause::default(),
@ -851,7 +851,7 @@ impl<'a> TraitDef<'a> {
of_trait: Some(Box::new(ast::TraitImplHeader { of_trait: Some(Box::new(ast::TraitImplHeader {
safety: self.safety, safety: self.safety,
polarity: ast::ImplPolarity::Positive, polarity: ast::ImplPolarity::Positive,
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
trait_ref, trait_ref,
})), })),
constness: if self.is_const { ast::Const::Yes(DUMMY_SP) } else { ast::Const::No }, constness: if self.is_const { ast::Const::Yes(DUMMY_SP) } else { ast::Const::No },
@ -1073,7 +1073,7 @@ impl<'a> MethodDef<'a> {
let trait_lo_sp = span.shrink_to_lo(); let trait_lo_sp = span.shrink_to_lo();
let sig = ast::FnSig { header: ast::FnHeader::default(), decl: fn_decl, span }; let sig = ast::FnSig { header: ast::FnHeader::default(), decl: fn_decl, span };
let defaultness = ast::Defaultness::Implicit; let defaultness = ast::Defaultness::Final;
// Create the method. // Create the method.
Box::new(ast::AssocItem { Box::new(ast::AssocItem {

View file

@ -77,7 +77,7 @@ impl AllocFnFactory<'_, '_> {
let sig = FnSig { decl, header, span: self.span }; let sig = FnSig { decl, header, span: self.span };
let body = Some(self.cx.block_expr(result)); let body = Some(self.cx.block_expr(result));
let kind = ItemKind::Fn(Box::new(Fn { let kind = ItemKind::Fn(Box::new(Fn {
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
sig, sig,
ident: Ident::from_str_and_span(&global_fn_name(method.name), self.span), ident: Ident::from_str_and_span(&global_fn_name(method.name), self.span),
generics: Generics::default(), generics: Generics::default(),
@ -128,15 +128,11 @@ impl AllocFnFactory<'_, '_> {
let usize = self.cx.path_ident(self.span, Ident::new(sym::usize, self.span)); let usize = self.cx.path_ident(self.span, Ident::new(sym::usize, self.span));
let ty_usize = self.cx.ty_path(usize); let ty_usize = self.cx.ty_path(usize);
args.push(self.cx.param(self.span, size, ty_usize)); args.push(self.cx.param(self.span, size, ty_usize.clone()));
let ty_align = self.ptr_alignment(); args.push(self.cx.param(self.span, align, ty_usize));
args.push(self.cx.param(self.span, align, ty_align));
let layout_new = self.cx.std_path(&[ let layout_new =
sym::alloc, self.cx.std_path(&[sym::alloc, sym::Layout, sym::from_size_align_unchecked]);
sym::Layout,
sym::from_size_alignment_unchecked,
]);
let layout_new = self.cx.expr_path(self.cx.path(self.span, layout_new)); let layout_new = self.cx.expr_path(self.cx.path(self.span, layout_new));
let size = self.cx.expr_ident(self.span, size); let size = self.cx.expr_ident(self.span, size);
let align = self.cx.expr_ident(self.span, align); let align = self.cx.expr_ident(self.span, align);
@ -179,12 +175,6 @@ impl AllocFnFactory<'_, '_> {
self.cx.ty_path(usize) self.cx.ty_path(usize)
} }
fn ptr_alignment(&self) -> Box<Ty> {
let path = self.cx.std_path(&[sym::ptr, sym::Alignment]);
let path = self.cx.path(self.span, path);
self.cx.ty_path(path)
}
fn ptr_u8(&self) -> Box<Ty> { fn ptr_u8(&self) -> Box<Ty> {
let u8 = self.cx.path_ident(self.span, Ident::new(sym::u8, self.span)); let u8 = self.cx.path_ident(self.span, Ident::new(sym::u8, self.span));
let ty_u8 = self.cx.ty_path(u8); let ty_u8 = self.cx.ty_path(u8);

View file

@ -283,7 +283,7 @@ pub(crate) fn expand_test_or_bench(
// const $ident: test::TestDescAndFn = // const $ident: test::TestDescAndFn =
ast::ItemKind::Const( ast::ItemKind::Const(
ast::ConstItem { ast::ConstItem {
defaultness: ast::Defaultness::Implicit, defaultness: ast::Defaultness::Final,
ident: Ident::new(fn_.ident.name, sp), ident: Ident::new(fn_.ident.name, sp),
generics: ast::Generics::default(), generics: ast::Generics::default(),
ty: cx.ty(sp, ast::TyKind::Path(None, test_path("TestDescAndFn"))), ty: cx.ty(sp, ast::TyKind::Path(None, test_path("TestDescAndFn"))),

View file

@ -330,7 +330,7 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> Box<ast::Item> {
let decl = ecx.fn_decl(ThinVec::new(), ast::FnRetTy::Ty(main_ret_ty)); let decl = ecx.fn_decl(ThinVec::new(), ast::FnRetTy::Ty(main_ret_ty));
let sig = ast::FnSig { decl, header: ast::FnHeader::default(), span: sp }; let sig = ast::FnSig { decl, header: ast::FnHeader::default(), span: sp };
let defaultness = ast::Defaultness::Implicit; let defaultness = ast::Defaultness::Final;
// Honor the reexport_test_harness_main attribute // Honor the reexport_test_harness_main attribute
let main_ident = match cx.reexport_test_harness_main { let main_ident = match cx.reexport_test_harness_main {

View file

@ -622,6 +622,11 @@ impl<T: ?Sized> Deref for Box<T> {
} }
} }
#[lang = "exchange_malloc"]
unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
unsafe { libc::malloc(size) }
}
#[lang = "drop"] #[lang = "drop"]
pub trait Drop { pub trait Drop {
fn drop(&mut self); fn drop(&mut self);

View file

@ -1,133 +0,0 @@
From 285d5716fcfa6d43a3516d899b73bc85da322c25 Mon Sep 17 00:00:00 2001
From: xonx <119700621+xonx4l@users.noreply.github.com>
Date: Sun, 15 Feb 2026 14:06:49 +0000
Subject: [PATCH] Disable f16 math tests for cranelift
---
coretests/tests/floats/mod.rs | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/coretests/tests/floats/mod.rs b/coretests/tests/floats/mod.rs
index c61961f8584..d7b4fa20322 100644
--- a/coretests/tests/floats/mod.rs
+++ b/coretests/tests/floats/mod.rs
@@ -1534,7 +1534,7 @@ fn s_nan() -> Float {
name: powf,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1557,7 +1557,7 @@ fn s_nan() -> Float {
name: exp,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1578,7 +1578,7 @@ fn s_nan() -> Float {
name: exp2,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1598,7 +1598,7 @@ fn s_nan() -> Float {
name: ln,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1620,7 +1620,7 @@ fn s_nan() -> Float {
name: log,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1645,7 +1645,7 @@ fn s_nan() -> Float {
name: log2,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1668,7 +1668,7 @@ fn s_nan() -> Float {
name: log10,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1692,7 +1692,7 @@ fn s_nan() -> Float {
name: asinh,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1725,7 +1725,7 @@ fn s_nan() -> Float {
name: acosh,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1753,7 +1753,7 @@ fn s_nan() -> Float {
name: atanh,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1779,7 +1779,7 @@ fn s_nan() -> Float {
name: gamma,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -1814,7 +1814,7 @@ fn s_nan() -> Float {
name: ln_gamma,
attrs: {
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
@@ -2027,7 +2027,7 @@ fn s_nan() -> Float {
attrs: {
// FIXME(f16_f128): add math tests when available
const: #[cfg(false)],
- f16: #[cfg(all(not(miri), target_has_reliable_f16_math))],
+ f16: #[cfg(false)], // FIXME(rust-lang/rustc_codegen_cranelift#1622)
f128: #[cfg(all(not(miri), target_has_reliable_f128_math))],
},
test<Float> {
--
2.50.1

View file

@ -902,6 +902,7 @@ fn codegen_stmt<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, cur_block: Block, stmt:
lval.write_cvalue_transmute(fx, operand); lval.write_cvalue_transmute(fx, operand);
} }
Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in codegen"), Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in codegen"),
Rvalue::ShallowInitBox(..) => bug!("`ShallowInitBox` in codegen"),
} }
} }
StatementKind::StorageLive(_) StatementKind::StorageLive(_)

View file

@ -208,7 +208,7 @@ pub(crate) fn codegen_cast(
let ret_ty = if to_ty.bits() < 32 { types::I32 } else { to_ty }; let ret_ty = if to_ty.bits() < 32 { types::I32 } else { to_ty };
let name = format!( let name = format!(
"__fix{sign}tf{size}i", "__fix{sign}tf{size}i",
sign = if to_signed { "" } else { "uns" }, sign = if from_signed { "" } else { "un" },
size = match ret_ty { size = match ret_ty {
types::I32 => 's', types::I32 => 's',
types::I64 => 'd', types::I64 => 'd',

View file

@ -180,10 +180,6 @@ impl CodegenBackend for CraneliftCodegenBackend {
&& sess.target.env == Env::Gnu && sess.target.env == Env::Gnu
&& sess.target.abi != Abi::Llvm); && sess.target.abi != Abi::Llvm);
// FIXME(f128): f128 math operations need f128 math symbols, which currently aren't always
// filled in by compiler-builtins. The only libc that provides these currently is glibc.
let has_reliable_f128_math = has_reliable_f16_f128 && sess.target.env == Env::Gnu;
TargetConfig { TargetConfig {
target_features, target_features,
unstable_target_features, unstable_target_features,
@ -192,7 +188,7 @@ impl CodegenBackend for CraneliftCodegenBackend {
has_reliable_f16: has_reliable_f16_f128, has_reliable_f16: has_reliable_f16_f128,
has_reliable_f16_math: has_reliable_f16_f128, has_reliable_f16_math: has_reliable_f16_f128,
has_reliable_f128: has_reliable_f16_f128, has_reliable_f128: has_reliable_f16_f128,
has_reliable_f128_math, has_reliable_f128_math: has_reliable_f16_f128,
} }
} }

View file

@ -18,7 +18,6 @@
], ],
"ignorePaths": [ "ignorePaths": [
"src/intrinsic/archs.rs", "src/intrinsic/archs.rs",
"src/intrinsic/old_archs.rs",
"src/intrinsic/llvm.rs" "src/intrinsic/llvm.rs"
], ],
"ignoreRegExpList": [ "ignoreRegExpList": [

View file

@ -113,10 +113,6 @@ jobs:
git config --global user.name "User" git config --global user.name "User"
./y.sh prepare ./y.sh prepare
- name: Add more failing tests for GCC without 128-bit integers support
if: ${{ matrix.libgccjit_version.gcc == 'gcc-15-without-int128.deb' }}
run: cat tests/failing-ui-tests-without-128bit-integers.txt >> tests/failing-ui-tests.txt
- name: Run tests - name: Run tests
run: | run: |
./y.sh test --release --clean --build-sysroot ${{ matrix.commands }} ./y.sh test --release --clean --build-sysroot ${{ matrix.commands }}

View file

@ -83,7 +83,7 @@ jobs:
run: | run: |
./y.sh prepare --only-libcore --cross ./y.sh prepare --only-libcore --cross
./y.sh build --sysroot --target-triple m68k-unknown-linux-gnu --target ${{ github.workspace }}/target_specs/m68k-unknown-linux-gnu.json ./y.sh build --sysroot --target-triple m68k-unknown-linux-gnu --target ${{ github.workspace }}/target_specs/m68k-unknown-linux-gnu.json
CG_RUSTFLAGS="-Clinker=m68k-unknown-linux-gnu-gcc" ./y.sh cargo build -Zjson-target-spec --manifest-path=./tests/hello-world/Cargo.toml --target ${{ github.workspace }}/target_specs/m68k-unknown-linux-gnu.json CG_RUSTFLAGS="-Clinker=m68k-unknown-linux-gnu-gcc" ./y.sh cargo build --manifest-path=./tests/hello-world/Cargo.toml --target ${{ github.workspace }}/target_specs/m68k-unknown-linux-gnu.json
./y.sh clean all ./y.sh clean all
- name: Build - name: Build

View file

@ -56,18 +56,18 @@ dependencies = [
[[package]] [[package]]
name = "gccjit" name = "gccjit"
version = "3.3.0" version = "3.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26b73d18b642ce16378af78f89664841d7eeafa113682ff5d14573424eb0232a" checksum = "ff80f4d6d0749eab3a69122210b3a1fdd52edb6162781aadd7c4842e26983683"
dependencies = [ dependencies = [
"gccjit_sys", "gccjit_sys",
] ]
[[package]] [[package]]
name = "gccjit_sys" name = "gccjit_sys"
version = "1.3.0" version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee689456c013616942d5aef9a84d613cefcc3b335340d036f3650fc1a7459e15" checksum = "4f81d901767ddba371a619fa9bba657066a4d3c5607ee69bbb557c1c5ba9bf85"
dependencies = [ dependencies = [
"libc", "libc",
] ]

View file

@ -24,7 +24,7 @@ default = ["master"]
[dependencies] [dependencies]
object = { version = "0.37.0", default-features = false, features = ["std", "read"] } object = { version = "0.37.0", default-features = false, features = ["std", "read"] }
tempfile = "3.20" tempfile = "3.20"
gccjit = { version = "3.3.0", features = ["dlopen"] } gccjit = { version = "3.1.1", features = ["dlopen"] }
#gccjit = { git = "https://github.com/rust-lang/gccjit.rs", branch = "error-dlopen", features = ["dlopen"] } #gccjit = { git = "https://github.com/rust-lang/gccjit.rs", branch = "error-dlopen", features = ["dlopen"] }
# Local copy. # Local copy.

View file

@ -45,12 +45,12 @@ The default configuration (see below in the [Quick start](#quick-start) section)
./y.sh test --release ./y.sh test --release
``` ```
If you don't need to test GCC patches you wrote in our GCC fork, then the default configuration should If don't need to test GCC patches you wrote in our GCC fork, then the default configuration should
be all you need. You can update the `rustc_codegen_gcc` without worrying about GCC. be all you need. You can update the `rustc_codegen_gcc` without worrying about GCC.
### Building with your own GCC version ### Building with your own GCC version
If you wrote a patch for GCC and want to test it with this backend, you will need If you wrote a patch for GCC and want to test it without this backend, you will need
to do a few more things. to do a few more things.
To build it (most of these instructions come from [here](https://gcc.gnu.org/onlinedocs/jit/internals/index.html), so don't hesitate to take a look there if you encounter an issue): To build it (most of these instructions come from [here](https://gcc.gnu.org/onlinedocs/jit/internals/index.html), so don't hesitate to take a look there if you encounter an issue):
@ -127,7 +127,7 @@ You have to run these commands, in the corresponding order:
$ ./y.sh prepare $ ./y.sh prepare
$ ./y.sh build --sysroot $ ./y.sh build --sysroot
``` ```
To check if all is working correctly, run: To check if all is working correctly, run:
```bash ```bash
$ ./y.sh cargo build --manifest-path tests/hello-world/Cargo.toml $ ./y.sh cargo build --manifest-path tests/hello-world/Cargo.toml

View file

@ -6,4 +6,4 @@ seh = "seh"
typ = "typ" typ = "typ"
[files] [files]
extend-exclude = ["src/intrinsic/archs.rs", "src/intrinsic/old_archs.rs"] extend-exclude = ["src/intrinsic/archs.rs"]

View file

@ -141,10 +141,6 @@ pub fn build_sysroot(env: &HashMap<String, String>, config: &ConfigInfo) -> Resu
} }
let mut args: Vec<&dyn AsRef<OsStr>> = vec![&"cargo", &"build", &"--target", &config.target]; let mut args: Vec<&dyn AsRef<OsStr>> = vec![&"cargo", &"build", &"--target", &config.target];
if config.target.ends_with(".json") {
args.push(&"-Zjson-target-spec");
}
for feature in &config.features { for feature in &config.features {
args.push(&"--features"); args.push(&"--features");
args.push(feature); args.push(feature);

View file

@ -679,10 +679,10 @@ fn test_projects(env: &Env, args: &TestArg) -> Result<(), String> {
create_dir(projects_path)?; create_dir(projects_path)?;
let nb_parts = args.nb_parts.unwrap_or(0); let nb_parts = args.nb_parts.unwrap_or(0);
if let Some(count) = projects.len().checked_div(nb_parts) { if nb_parts > 0 {
// We increment the number of tests by one because if this is an odd number, we would skip // We increment the number of tests by one because if this is an odd number, we would skip
// one test. // one test.
let count = count + 1; let count = projects.len() / nb_parts + 1;
let current_part = args.current_part.unwrap(); let current_part = args.current_part.unwrap();
let start = current_part * count; let start = current_part * count;
// We remove the projects we don't want to test. // We remove the projects we don't want to test.

View file

@ -2,9 +2,9 @@
## How to debug GCC LTO ## How to debug GCC LTO
Run the command with `-v -save-temps` and then extract the `lto1` line from the output and run that under the debugger. Run do the command with `-v -save-temps` and then extract the `lto1` line from the output and run that under the debugger.
## How to debug stdarch tests that cannot be run locally ## How to debug stdarch tests that cannot be ran locally
First, run the tests normally: First, run the tests normally:

View file

@ -15,7 +15,7 @@ That can be caused by the fact that you try to compile with `lto = "fat"`, but y
### ld: cannot find crtbegin.o ### ld: cannot find crtbegin.o
When compiling an executable with libgccjit, if setting the `*LIBRARY_PATH` variables to the install directory, you will get the following errors: When compiling an executable with libgccijt, if setting the `*LIBRARY_PATH` variables to the install directory, you will get the following errors:
``` ```
ld: cannot find crtbegin.o: No such file or directory ld: cannot find crtbegin.o: No such file or directory

View file

@ -3,7 +3,7 @@
You can see the full documentation about what GIMPLE is [here](https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html). In this document we will explain how to generate it. You can see the full documentation about what GIMPLE is [here](https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html). In this document we will explain how to generate it.
First, we'll copy the content from `gcc/gcc/testsuite/jit.dg/test-const-attribute.c` into a First, we'll copy the content from `gcc/gcc/testsuite/jit.dg/test-const-attribute.c` into a
file named `local.c` and remove the content we're not interested in: file named `local.c` and remove the content we're not interested into:
```diff ```diff
- /* { dg-do compile { target x86_64-*-* } } */ - /* { dg-do compile { target x86_64-*-* } } */

View file

@ -53,7 +53,7 @@ If you wish to build a custom sysroot, pass the path of your sysroot source to `
### How to use [mem-trace](https://github.com/antoyo/mem-trace) ### How to use [mem-trace](https://github.com/antoyo/mem-trace)
`rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't have a chance to intercept the calls to `malloc`. `rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`.
### How to generate GIMPLE ### How to generate GIMPLE

View file

@ -628,6 +628,11 @@ impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
} }
} }
#[lang = "exchange_malloc"]
unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
libc::malloc(size)
}
#[lang = "drop"] #[lang = "drop"]
pub trait Drop { pub trait Drop {
fn drop(&mut self); fn drop(&mut self);

View file

@ -1 +1 @@
efdd0a7290c22f5438d7c5380105d353ee3e8518 0081ca6631abdfa02bf42bc85aaf507b8a0e6beb

View file

@ -1,3 +1,3 @@
[toolchain] [toolchain]
channel = "nightly-2026-02-14" channel = "nightly-2025-12-20"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"] components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View file

@ -575,7 +575,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
if dest.is_none() && options.contains(InlineAsmOptions::NORETURN) { if dest.is_none() && options.contains(InlineAsmOptions::NORETURN) {
let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
self.llbb().add_eval(None, self.context.new_call(None, builtin_unreachable, &[])); let builtin_unreachable: RValue<'gcc> =
unsafe { std::mem::transmute(builtin_unreachable) };
self.call(self.type_void(), None, None, builtin_unreachable, &[], None, None);
} }
// Write results to outputs. // Write results to outputs.

View file

@ -17,13 +17,15 @@
// /usr/bin/ld: warning: type of symbol `_RNvNvNvNvNtNtNtCsAj5i4SGTR7_3std4sync4mpmc5waker17current_thread_id5DUMMY7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o // /usr/bin/ld: warning: type of symbol `_RNvNvNvNvNtNtNtCsAj5i4SGTR7_3std4sync4mpmc5waker17current_thread_id5DUMMY7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
// /usr/bin/ld: warning: incremental linking of LTO and non-LTO objects; using -flinker-output=nolto-rel which will bypass whole program optimization // /usr/bin/ld: warning: incremental linking of LTO and non-LTO objects; using -flinker-output=nolto-rel which will bypass whole program optimization
// cSpell:enable // cSpell:enable
use std::ffi::CString; use std::ffi::{CStr, CString};
use std::fs::{self, File}; use std::fs::{self, File};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::Ordering;
use gccjit::OutputKind; use gccjit::{Context, OutputKind};
use object::read::archive::ArchiveFile; use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::SerializedModule; use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter}; use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter};
use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
@ -31,12 +33,15 @@ use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_errors::{DiagCtxt, DiagCtxtHandle}; use rustc_errors::{DiagCtxt, DiagCtxtHandle};
use rustc_log::tracing::info; use rustc_log::tracing::info;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
use rustc_session::config::Lto; use rustc_session::config::Lto;
use rustc_target::spec::RelocModel;
use tempfile::{TempDir, tempdir}; use tempfile::{TempDir, tempdir};
use crate::back::write::save_temp_bitcode; use crate::back::write::save_temp_bitcode;
use crate::errors::LtoBitcodeFromRlib; use crate::errors::LtoBitcodeFromRlib;
use crate::{GccCodegenBackend, GccContext, LtoMode, to_gcc_opt_level}; use crate::{GccCodegenBackend, GccContext, LTO_SUPPORTED, LtoMode, SyncContext, to_gcc_opt_level};
struct LtoData { struct LtoData {
// TODO(antoyo): use symbols_below_threshold. // TODO(antoyo): use symbols_below_threshold.
@ -276,3 +281,385 @@ impl ModuleBufferMethods for ModuleBuffer {
&[] &[]
} }
} }
/// Performs thin LTO by performing necessary global analysis and returning two
/// lists, one of the modules that need optimization and another for modules that
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext,
prof: &SelfProfilerRef,
dcx: DiagCtxtHandle<'_>,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) {
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx);
if cgcx.use_linker_plugin_lto {
unreachable!(
"We should never reach this case if the LTO step \
is deferred to the linker"
);
}
thin_lto(
cgcx,
prof,
dcx,
modules,
lto_data.upstream_modules,
lto_data.tmp_path,
cached_modules,
//&lto_data.symbols_below_threshold,
)
}
pub(crate) fn prepare_thin(module: ModuleCodegen<GccContext>) -> (String, ThinBuffer) {
let name = module.name;
//let buffer = ThinBuffer::new(module.module_llvm.context, true);
let buffer = ThinBuffer::new(&module.module_llvm.context);
(name, buffer)
}
/// Prepare "thin" LTO to get run on these modules.
///
/// The general structure of ThinLTO is quite different from the structure of
/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
/// one giant LLVM module, and then we run more optimization passes over this
/// big module after internalizing most symbols. Thin LTO, on the other hand,
/// avoid this large bottleneck through more targeted optimization.
///
/// At a high level Thin LTO looks like:
///
/// 1. Prepare a "summary" of each LLVM module in question which describes
/// the values inside, cost of the values, etc.
/// 2. Merge the summaries of all modules in question into one "index"
/// 3. Perform some global analysis on this index
/// 4. For each module, use the index and analysis calculated previously to
/// perform local transformations on the module, for example inlining
/// small functions from other modules.
/// 5. Run thin-specific optimization passes over each module, and then code
/// generate everything at the end.
///
/// The summary for each module is intended to be quite cheap, and the global
/// index is relatively quite cheap to create as well. As a result, the goal of
/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
/// situations. For example one cheap optimization is that we can parallelize
/// all codegen modules, easily making use of all the cores on a machine.
///
/// With all that in mind, the function here is designed at specifically just
/// calculating the *index* for ThinLTO. This index will then be shared amongst
/// all of the `LtoModuleCodegen` units returned below and destroyed once
/// they all go out of scope.
fn thin_lto(
_cgcx: &CodegenContext,
prof: &SelfProfilerRef,
_dcx: DiagCtxtHandle<'_>,
modules: Vec<(String, ThinBuffer)>,
serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
tmp_path: TempDir,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
//_symbols_below_threshold: &[String],
) -> (Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>) {
let _timer = prof.generic_activity("LLVM_thin_lto_global_analysis");
info!("going for that thin, thin LTO");
/*let green_modules: FxHashMap<_, _> =
cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();*/
let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
let mut thin_buffers = Vec::with_capacity(modules.len());
let mut module_names = Vec::with_capacity(full_scope_len);
//let mut thin_modules = Vec::with_capacity(full_scope_len);
for (i, (name, buffer)) in modules.into_iter().enumerate() {
info!("local module: {} - {}", i, name);
let cname = CString::new(name.as_bytes()).unwrap();
/*thin_modules.push(llvm::ThinLTOModule {
identifier: cname.as_ptr(),
data: buffer.data().as_ptr(),
len: buffer.data().len(),
});*/
thin_buffers.push(buffer);
module_names.push(cname);
}
// FIXME: All upstream crates are deserialized internally in the
// function below to extract their summary and modules. Note that
// unlike the loop above we *must* decode and/or read something
// here as these are all just serialized files on disk. An
// improvement, however, to make here would be to store the
// module summary separately from the actual module itself. Right
// now this is store in one large bitcode file, and the entire
// file is deflate-compressed. We could try to bypass some of the
// decompression by storing the index uncompressed and only
// lazily decompressing the bytecode if necessary.
//
// Note that truly taking advantage of this optimization will
// likely be further down the road. We'd have to implement
// incremental ThinLTO first where we could actually avoid
// looking at upstream modules entirely sometimes (the contents,
// we must always unconditionally look at the index).
let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
let cached_modules =
cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
info!("upstream or cached module {:?}", name);
/*thin_modules.push(llvm::ThinLTOModule {
identifier: name.as_ptr(),
data: module.data().as_ptr(),
len: module.data().len(),
});*/
match module {
SerializedModule::Local(_) => {
//let path = module_buffer.0.to_str().expect("path");
//let my_path = PathBuf::from(path);
//let exists = my_path.exists();
/*module.module_llvm.should_combine_object_files = true;
module
.module_llvm
.context
.add_driver_option(module_buffer.0.to_str().expect("path"));*/
}
SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
SerializedModule::FromUncompressedFile(_) => {
unimplemented!("from uncompressed file")
}
}
serialized.push(module);
module_names.push(name);
}
// Sanity check
//assert_eq!(thin_modules.len(), module_names.len());
// Delegate to the C++ bindings to create some data here. Once this is a
// tried-and-true interface we may wish to try to upstream some of this
// to LLVM itself, right now we reimplement a lot of what they do
// upstream...
/*let data = llvm::LLVMRustCreateThinLTOData(
thin_modules.as_ptr(),
thin_modules.len() as u32,
symbols_below_threshold.as_ptr(),
symbols_below_threshold.len() as u32,
)
.ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
*/
let data = ThinData; //(Arc::new(tmp_path))/*(data)*/;
info!("thin LTO data created");
/*let (key_map_path, prev_key_map, curr_key_map) =
if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir {
let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
// If the previous file was deleted, or we get an IO error
// reading the file, then we'll just use `None` as the
// prev_key_map, which will force the code to be recompiled.
let prev =
if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
(Some(path), prev, curr)
}
else {
// If we don't compile incrementally, we don't need to load the
// import data from LLVM.
assert!(green_modules.is_empty());
let curr = ThinLTOKeysMap::default();
(None, None, curr)
};
info!("thin LTO cache key map loaded");
info!("prev_key_map: {:#?}", prev_key_map);
info!("curr_key_map: {:#?}", curr_key_map);*/
// Throw our data in an `Arc` as we'll be sharing it across threads. We
// also put all memory referenced by the C++ data (buffers, ids, etc)
// into the arc as well. After this we'll create a thin module
// codegen per module in this data.
let shared =
Arc::new(ThinShared { data, thin_buffers, serialized_modules: serialized, module_names });
let copy_jobs = vec![];
let mut opt_jobs = vec![];
info!("checking which modules can be-reused and which have to be re-optimized.");
for (module_index, module_name) in shared.module_names.iter().enumerate() {
let module_name = module_name_to_str(module_name);
/*if let (Some(prev_key_map), true) =
(prev_key_map.as_ref(), green_modules.contains_key(module_name))
{
assert!(cgcx.incr_comp_session_dir.is_some());
// If a module exists in both the current and the previous session,
// and has the same LTO cache key in both sessions, then we can re-use it
if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
let work_product = green_modules[module_name].clone();
copy_jobs.push(work_product);
info!(" - {}: re-used", module_name);
assert!(cgcx.incr_comp_session_dir.is_some());
continue;
}
}*/
info!(" - {}: re-compiled", module_name);
opt_jobs.push(ThinModule { shared: shared.clone(), idx: module_index });
}
// Save the current ThinLTO import information for the next compilation
// session, overwriting the previous serialized data (if any).
/*if let Some(path) = key_map_path {
if let Err(err) = curr_key_map.save_to_file(&path) {
return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }));
}
}*/
// NOTE: save the temporary directory used by LTO so that it gets deleted after linking instead
// of now.
//module.module_llvm.temp_dir = Some(tmp_path);
// TODO: save the directory so that it gets deleted later.
std::mem::forget(tmp_path);
(opt_jobs, copy_jobs)
}
pub fn optimize_thin_module(
thin_module: ThinModule<GccCodegenBackend>,
_cgcx: &CodegenContext,
) -> ModuleCodegen<GccContext> {
//let module_name = &thin_module.shared.module_names[thin_module.idx];
// Right now the implementation we've got only works over serialized
// modules, so we create a fresh new LLVM context and parse the module
// into that context. One day, however, we may do this for upstream
// crates but for locally codegened modules we may be able to reuse
// that LLVM Context and Module.
//let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
//let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &dcx)? as *const _;
let mut lto_mode = LtoMode::None;
let context = match thin_module.shared.thin_buffers.get(thin_module.idx) {
Some(thin_buffer) => Arc::clone(&thin_buffer.context),
None => {
let context = Context::default();
let len = thin_module.shared.thin_buffers.len();
let module = &thin_module.shared.serialized_modules[thin_module.idx - len];
match *module {
SerializedModule::Local(ref module_buffer) => {
let path = module_buffer.0.to_str().expect("path");
context.add_driver_option(path);
lto_mode = LtoMode::Thin;
/*module.module_llvm.should_combine_object_files = true;
module
.module_llvm
.context
.add_driver_option(module_buffer.0.to_str().expect("path"));*/
}
SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
SerializedModule::FromUncompressedFile(_) => {
unimplemented!("from uncompressed file")
}
}
Arc::new(SyncContext::new(context))
}
};
let lto_supported = LTO_SUPPORTED.load(Ordering::SeqCst);
let module = ModuleCodegen::new_regular(
thin_module.name().to_string(),
GccContext {
context,
lto_mode,
lto_supported,
// TODO(antoyo): use the correct relocation model here.
relocation_model: RelocModel::Pic,
temp_dir: None,
},
);
/*{
let target = &*module.module_llvm.tm;
let llmod = module.module_llvm.llmod();
save_temp_bitcode(cgcx, &module, "thin-lto-input");
// Up next comes the per-module local analyses that we do for Thin LTO.
// Each of these functions is basically copied from the LLVM
// implementation and then tailored to suit this implementation. Ideally
// each of these would be supported by upstream LLVM but that's perhaps
// a patch for another day!
//
// You can find some more comments about these functions in the LLVM
// bindings we've got (currently `PassWrapper.cpp`)
{
let _timer =
cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
unsafe { llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) };
save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
}
{
let _timer = cgcx
.prof
.generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
}
{
let _timer = cgcx
.prof
.generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
}
{
let _timer =
cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
}
// Alright now that we've done everything related to the ThinLTO
// analysis it's time to run some optimizations! Here we use the same
// `run_pass_manager` as the "fat" LTO above except that we tell it to
// populate a thin-specific pass manager, which presumably LLVM treats a
// little differently.
{
info!("running thin lto passes over {}", module.name);
run_pass_manager(cgcx, &dcx, &mut module, true)?;
save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
}
}*/
// FIXME: switch to #[expect] when the clippy bug is fixed.
#[allow(clippy::let_and_return)]
module
}
pub struct ThinBuffer {
context: Arc<SyncContext>,
}
impl ThinBuffer {
pub(crate) fn new(context: &Arc<SyncContext>) -> Self {
Self { context: Arc::clone(context) }
}
}
impl ThinBufferMethods for ThinBuffer {
fn data(&self) -> &[u8] {
&[]
}
}
pub struct ThinData; //(Arc<TempDir>);
fn module_name_to_str(c_str: &CStr) -> &str {
c_str.to_str().unwrap_or_else(|e| {
bug!("Encountered non-utf8 GCC module name `{}`: {}", c_str.to_string_lossy(), e)
})
}

View file

@ -1495,8 +1495,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
#[cfg(not(feature = "master"))] #[cfg(not(feature = "master"))]
fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> { fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
use crate::context::new_array_type;
let vector_type = vec let vector_type = vec
.get_type() .get_type()
.unqualified() .unqualified()
@ -1505,7 +1503,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let element_type = vector_type.get_element_type(); let element_type = vector_type.get_element_type();
let vec_num_units = vector_type.get_num_units(); let vec_num_units = vector_type.get_num_units();
let array_type = let array_type =
new_array_type(self.context, self.location, element_type, vec_num_units as u64); self.context.new_array_type(self.location, element_type, vec_num_units as u64);
let array = self.context.new_bitcast(self.location, vec, array_type).to_rvalue(); let array = self.context.new_bitcast(self.location, vec, array_type).to_rvalue();
self.context.new_array_access(self.location, array, idx).to_rvalue() self.context.new_array_access(self.location, array, idx).to_rvalue()
} }
@ -1873,31 +1871,32 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
// we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
// This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
fn int_max(signed: bool, int_width: u64) -> u128 { let int_max = |signed: bool, int_width: u64| -> u128 {
let shift_amount = 128 - int_width; let shift_amount = 128 - int_width;
if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
} };
fn int_min(signed: bool, int_width: u64) -> i128 { let int_min = |signed: bool, int_width: u64| -> i128 {
if signed { i128::MIN >> (128 - int_width) } else { 0 } if signed { i128::MIN >> (128 - int_width) } else { 0 }
} };
// TODO: rewrite using a generic function with <F: Float>. let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
let compute_clamp_bounds_half = |signed: bool, int_width: u64| -> (u128, u128) {
let rounded_min = let rounded_min =
ieee::Half::from_i128_r(int_min(signed, int_width), Round::TowardZero); ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
//assert_eq!(rounded_min.status, Status::OK); assert_eq!(rounded_min.status, Status::OK);
let rounded_max = let rounded_max =
ieee::Half::from_u128_r(int_max(signed, int_width), Round::TowardZero); ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
assert!(rounded_max.value.is_finite()); assert!(rounded_max.value.is_finite());
(rounded_min.value.to_bits(), rounded_max.value.to_bits()) (rounded_min.value.to_bits(), rounded_max.value.to_bits())
}; };
fn compute_clamp_bounds<F: Float>(signed: bool, int_width: u64) -> (u128, u128) { let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
let rounded_min = F::from_i128_r(int_min(signed, int_width), Round::TowardZero); let rounded_min =
ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
assert_eq!(rounded_min.status, Status::OK); assert_eq!(rounded_min.status, Status::OK);
let rounded_max = F::from_u128_r(int_max(signed, int_width), Round::TowardZero); let rounded_max =
ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
assert!(rounded_max.value.is_finite()); assert!(rounded_max.value.is_finite());
(rounded_min.value.to_bits(), rounded_max.value.to_bits()) (rounded_min.value.to_bits(), rounded_max.value.to_bits())
} };
// To implement saturation, we perform the following steps: // To implement saturation, we perform the following steps:
// //
// 1. Cast val to an integer with fpto[su]i. This may result in undef. // 1. Cast val to an integer with fpto[su]i. This may result in undef.
@ -1927,19 +1926,15 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let float_bits_to_llval = |bx: &mut Self, bits| { let float_bits_to_llval = |bx: &mut Self, bits| {
let bits_llval = match float_width { let bits_llval = match float_width {
16 => bx.cx().const_u16(bits as u16),
32 => bx.cx().const_u32(bits as u32), 32 => bx.cx().const_u32(bits as u32),
64 => bx.cx().const_u64(bits as u64), 64 => bx.cx().const_u64(bits as u64),
128 => bx.cx().const_u128(bits),
n => bug!("unsupported float width {}", n), n => bug!("unsupported float width {}", n),
}; };
bx.bitcast(bits_llval, float_ty) bx.bitcast(bits_llval, float_ty)
}; };
let (f_min, f_max) = match float_width { let (f_min, f_max) = match float_width {
16 => compute_clamp_bounds_half(signed, int_width), 32 => compute_clamp_bounds_single(signed, int_width),
32 => compute_clamp_bounds::<ieee::Single>(signed, int_width), 64 => compute_clamp_bounds_double(signed, int_width),
64 => compute_clamp_bounds::<ieee::Double>(signed, int_width),
128 => compute_clamp_bounds::<ieee::Quad>(signed, int_width),
n => bug!("unsupported float width {}", n), n => bug!("unsupported float width {}", n),
}; };
let f_min = float_bits_to_llval(self, f_min); let f_min = float_bits_to_llval(self, f_min);

View file

@ -8,7 +8,7 @@ use rustc_middle::mir::Mutability;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar}; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar};
use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::layout::LayoutOf;
use crate::context::{CodegenCx, new_array_type}; use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt; use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
@ -20,10 +20,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
bytes_in_context(self, bytes) bytes_in_context(self, bytes)
} }
pub fn const_u16(&self, i: u16) -> RValue<'gcc> {
self.const_uint(self.type_u16(), i as u64)
}
fn global_string(&self, string: &str) -> LValue<'gcc> { fn global_string(&self, string: &str) -> LValue<'gcc> {
// TODO(antoyo): handle non-null-terminated strings. // TODO(antoyo): handle non-null-terminated strings.
let string = self.context.new_string_literal(string); let string = self.context.new_string_literal(string);
@ -59,7 +55,7 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) ->
0 => { 0 => {
let context = &cx.context; let context = &cx.context;
let byte_type = context.new_type::<u64>(); let byte_type = context.new_type::<u64>();
let typ = new_array_type(context, None, byte_type, bytes.len() as u64 / 8); let typ = context.new_array_type(None, byte_type, bytes.len() as u64 / 8);
let elements: Vec<_> = bytes let elements: Vec<_> = bytes
.chunks_exact(8) .chunks_exact(8)
.map(|arr| { .map(|arr| {
@ -80,7 +76,7 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) ->
4 => { 4 => {
let context = &cx.context; let context = &cx.context;
let byte_type = context.new_type::<u32>(); let byte_type = context.new_type::<u32>();
let typ = new_array_type(context, None, byte_type, bytes.len() as u64 / 4); let typ = context.new_array_type(None, byte_type, bytes.len() as u64 / 4);
let elements: Vec<_> = bytes let elements: Vec<_> = bytes
.chunks_exact(4) .chunks_exact(4)
.map(|arr| { .map(|arr| {
@ -99,7 +95,7 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) ->
_ => { _ => {
let context = cx.context; let context = cx.context;
let byte_type = context.new_type::<u8>(); let byte_type = context.new_type::<u8>();
let typ = new_array_type(context, None, byte_type, bytes.len() as u64); let typ = context.new_array_type(None, byte_type, bytes.len() as u64);
let elements: Vec<_> = bytes let elements: Vec<_> = bytes
.iter() .iter()
.map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)) .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32))

View file

@ -19,8 +19,6 @@ use rustc_middle::ty::layout::{
}; };
use rustc_middle::ty::{self, ExistentialTraitRef, Instance, Ty, TyCtxt}; use rustc_middle::ty::{self, ExistentialTraitRef, Instance, Ty, TyCtxt};
use rustc_session::Session; use rustc_session::Session;
#[cfg(feature = "master")]
use rustc_session::config::DebugInfo;
use rustc_span::source_map::respan; use rustc_span::source_map::respan;
use rustc_span::{DUMMY_SP, Span}; use rustc_span::{DUMMY_SP, Span};
use rustc_target::spec::{HasTargetSpec, HasX86AbiOpt, Target, TlsModel, X86Abi}; use rustc_target::spec::{HasTargetSpec, HasX86AbiOpt, Target, TlsModel, X86Abi};
@ -147,11 +145,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
supports_f64_type: bool, supports_f64_type: bool,
supports_f128_type: bool, supports_f128_type: bool,
) -> Self { ) -> Self {
#[cfg(feature = "master")]
if tcx.sess.opts.debuginfo != DebugInfo::None {
context.set_filename(codegen_unit.name().as_str());
}
let create_type = |ctype, rust_type| { let create_type = |ctype, rust_type| {
let layout = tcx let layout = tcx
.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(rust_type)) .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(rust_type))
@ -201,8 +194,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
// TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in // TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
// gcc_jit_context_new_array_constructor (it should not use reinterpret_cast). // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
let i128_type = new_array_type(context, None, i64_type, 2)/*.get_aligned(i128_align)*/; let i128_type = context.new_array_type(None, i64_type, 2)/*.get_aligned(i128_align)*/;
let u128_type = new_array_type(context, None, u64_type, 2)/*.get_aligned(u128_align)*/; let u128_type = context.new_array_type(None, u64_type, 2)/*.get_aligned(u128_align)*/;
(i128_type, u128_type) (i128_type, u128_type)
}; };
@ -608,17 +601,3 @@ fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
TlsModel::Emulated => gccjit::TlsModel::GlobalDynamic, TlsModel::Emulated => gccjit::TlsModel::GlobalDynamic,
} }
} }
pub fn new_array_type<'gcc>(
context: &'gcc Context<'gcc>,
location: Option<Location<'gcc>>,
typ: Type<'gcc>,
size: u64,
) -> Type<'gcc> {
#[cfg(feature = "master")]
{
context.new_array_type_u64(location, typ, size)
}
#[cfg(not(feature = "master"))]
context.new_array_type(location, typ, size)
}

View file

@ -942,7 +942,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
fn float_to_int_cast( fn float_to_int_cast(
&self, &self,
signed: bool, signed: bool,
mut value: RValue<'gcc>, value: RValue<'gcc>,
dest_typ: Type<'gcc>, dest_typ: Type<'gcc>,
) -> RValue<'gcc> { ) -> RValue<'gcc> {
let value_type = value.get_type(); let value_type = value.get_type();
@ -951,22 +951,16 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
debug_assert!(dest_typ.dyncast_array().is_some()); debug_assert!(dest_typ.dyncast_array().is_some());
let (dest_type, param_type) = match self.type_kind(value_type) {
TypeKind::Half => (Some(self.float_type), self.float_type),
_ => (None, value_type),
};
let name_suffix = match self.type_kind(value_type) { let name_suffix = match self.type_kind(value_type) {
// cSpell:disable // cSpell:disable
// Since we will cast Half to a float, we use sfti for both. TypeKind::Float => "sfti",
TypeKind::Half | TypeKind::Float => "sfti",
TypeKind::Double => "dfti", TypeKind::Double => "dfti",
TypeKind::FP128 => "tfti",
// cSpell:enable // cSpell:enable
kind => panic!("cannot cast a {:?} to non-native integer", kind), kind => panic!("cannot cast a {:?} to non-native integer", kind),
}; };
let sign = if signed { "" } else { "uns" }; let sign = if signed { "" } else { "uns" };
let func_name = format!("__fix{}{}", sign, name_suffix); let func_name = format!("__fix{}{}", sign, name_suffix);
let param = self.context.new_parameter(None, param_type, "n"); let param = self.context.new_parameter(None, value_type, "n");
let func = self.context.new_function( let func = self.context.new_function(
None, None,
FunctionType::Extern, FunctionType::Extern,
@ -975,9 +969,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
func_name, func_name,
false, false,
); );
if let Some(dest_type) = dest_type {
value = self.context.new_cast(None, value, dest_type);
}
self.context.new_call(None, func, &[value]) self.context.new_call(None, func, &[value])
} }

File diff suppressed because it is too large Load diff

View file

@ -4,7 +4,7 @@ use gccjit::{CType, Context, Field, Function, FunctionPtrType, RValue, ToRValue,
use rustc_codegen_ssa::traits::BuilderMethods; use rustc_codegen_ssa::traits::BuilderMethods;
use crate::builder::Builder; use crate::builder::Builder;
use crate::context::{CodegenCx, new_array_type}; use crate::context::CodegenCx;
fn encode_key_128_type<'a, 'gcc, 'tcx>( fn encode_key_128_type<'a, 'gcc, 'tcx>(
builder: &Builder<'a, 'gcc, 'tcx>, builder: &Builder<'a, 'gcc, 'tcx>,
@ -585,7 +585,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
"__builtin_ia32_encodekey128_u32" => { "__builtin_ia32_encodekey128_u32" => {
let mut new_args = args.to_vec(); let mut new_args = args.to_vec();
let m128i = builder.context.new_vector_type(builder.i64_type, 2); let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let array_type = new_array_type(builder.context, None, m128i, 6); let array_type = builder.context.new_array_type(None, m128i, 6);
let result = builder.current_func().new_local(None, array_type, "result"); let result = builder.current_func().new_local(None, array_type, "result");
new_args.push(result.get_address(None)); new_args.push(result.get_address(None));
args = new_args.into(); args = new_args.into();
@ -593,7 +593,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
"__builtin_ia32_encodekey256_u32" => { "__builtin_ia32_encodekey256_u32" => {
let mut new_args = args.to_vec(); let mut new_args = args.to_vec();
let m128i = builder.context.new_vector_type(builder.i64_type, 2); let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let array_type = new_array_type(builder.context, None, m128i, 7); let array_type = builder.context.new_array_type(None, m128i, 7);
let result = builder.current_func().new_local(None, array_type, "result"); let result = builder.current_func().new_local(None, array_type, "result");
new_args.push(result.get_address(None)); new_args.push(result.get_address(None));
args = new_args.into(); args = new_args.into();
@ -620,7 +620,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
let first_value = old_args.swap_remove(0); let first_value = old_args.swap_remove(0);
let element_type = first_value.get_type(); let element_type = first_value.get_type();
let array_type = new_array_type(builder.context, None, element_type, 8); let array_type = builder.context.new_array_type(None, element_type, 8);
let result = builder.current_func().new_local(None, array_type, "result"); let result = builder.current_func().new_local(None, array_type, "result");
new_args.push(result.get_address(None)); new_args.push(result.get_address(None));
@ -869,7 +869,7 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
builder.llbb().add_assignment(None, field1, return_value); builder.llbb().add_assignment(None, field1, return_value);
let field2 = result.access_field(None, field2); let field2 = result.access_field(None, field2);
let field2_type = field2.to_rvalue().get_type(); let field2_type = field2.to_rvalue().get_type();
let array_type = new_array_type(builder.context, None, field2_type, 6); let array_type = builder.context.new_array_type(None, field2_type, 6);
let ptr = builder.context.new_cast(None, args[2], array_type.make_pointer()); let ptr = builder.context.new_cast(None, args[2], array_type.make_pointer());
let field2_ptr = let field2_ptr =
builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer()); builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
@ -891,7 +891,7 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
builder.llbb().add_assignment(None, field1, return_value); builder.llbb().add_assignment(None, field1, return_value);
let field2 = result.access_field(None, field2); let field2 = result.access_field(None, field2);
let field2_type = field2.to_rvalue().get_type(); let field2_type = field2.to_rvalue().get_type();
let array_type = new_array_type(builder.context, None, field2_type, 7); let array_type = builder.context.new_array_type(None, field2_type, 7);
let ptr = builder.context.new_cast(None, args[3], array_type.make_pointer()); let ptr = builder.context.new_cast(None, args[3], array_type.make_pointer());
let field2_ptr = let field2_ptr =
builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer()); builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
@ -937,7 +937,7 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
builder.llbb().add_assignment(None, field1, return_value); builder.llbb().add_assignment(None, field1, return_value);
let field2 = result.access_field(None, field2); let field2 = result.access_field(None, field2);
let field2_type = field2.to_rvalue().get_type(); let field2_type = field2.to_rvalue().get_type();
let array_type = new_array_type(builder.context, None, field2_type, 8); let array_type = builder.context.new_array_type(None, field2_type, 8);
let ptr = builder.context.new_cast(None, args[0], array_type.make_pointer()); let ptr = builder.context.new_cast(None, args[0], array_type.make_pointer());
let field2_ptr = let field2_ptr =
builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer()); builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
@ -1061,18 +1061,7 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
"llvm.x86.xgetbv" => "__builtin_ia32_xgetbv", "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv",
// NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
// FIXME: Should handle other targets than `ia32`.
"llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd", "llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd",
// FIXME: Should handle other targets than `ia32`.
"llvm.sqrt.v4f32" => "__builtin_ia32_sqrtps",
"llvm.sqrt.f32" => {
let gcc_name = "__builtin_sqrtf";
let func = cx.context.get_builtin_function(gcc_name);
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func;
}
// FIXME: Should handle other targets than `ia32`.
"llvm.smax.v4i32" => "__builtin_ia32_pmaxsd128",
"llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask", "llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask",
"llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask", "llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask",
"llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask", "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask",
@ -1615,7 +1604,5 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
func func
} }
#[cfg(feature = "master")]
include!("old_archs.rs");
#[cfg(feature = "master")] #[cfg(feature = "master")]
include!("archs.rs"); include!("archs.rs");

View file

@ -208,7 +208,6 @@ fn get_simple_function_f128<'gcc, 'tcx>(
let f128_type = cx.type_f128(); let f128_type = cx.type_f128();
let func_name = match name { let func_name = match name {
sym::ceilf128 => "ceilf128", sym::ceilf128 => "ceilf128",
sym::fabsf128 => "fabsf128",
sym::floorf128 => "floorf128", sym::floorf128 => "floorf128",
sym::truncf128 => "truncf128", sym::truncf128 => "truncf128",
sym::roundf128 => "roundf128", sym::roundf128 => "roundf128",
@ -263,7 +262,6 @@ fn f16_builtin<'gcc, 'tcx>(
let builtin_name = match name { let builtin_name = match name {
sym::ceilf16 => "__builtin_ceilf", sym::ceilf16 => "__builtin_ceilf",
sym::copysignf16 => "__builtin_copysignf", sym::copysignf16 => "__builtin_copysignf",
sym::fabsf16 => "fabsf",
sym::floorf16 => "__builtin_floorf", sym::floorf16 => "__builtin_floorf",
sym::fmaf16 => "fmaf", sym::fmaf16 => "fmaf",
sym::maxnumf16 => "__builtin_fmaxf", sym::maxnumf16 => "__builtin_fmaxf",
@ -330,7 +328,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
} }
sym::ceilf16 sym::ceilf16
| sym::copysignf16 | sym::copysignf16
| sym::fabsf16
| sym::floorf16 | sym::floorf16
| sym::fmaf16 | sym::fmaf16
| sym::maxnumf16 | sym::maxnumf16
@ -651,15 +648,15 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
let fn_ptr = func.get_address(None); let fn_ptr = func.get_address(None);
let fn_ty = fn_ptr.get_type(); let fn_ty = fn_ptr.get_type();
let mut call_args = vec![]; let mut llargs = vec![];
for arg in args { for arg in args {
match arg.val { match arg.val {
OperandValue::ZeroSized => {} OperandValue::ZeroSized => {}
OperandValue::Immediate(_) => call_args.push(arg.immediate()), OperandValue::Immediate(_) => llargs.push(arg.immediate()),
OperandValue::Pair(a, b) => { OperandValue::Pair(a, b) => {
call_args.push(a); llargs.push(a);
call_args.push(b); llargs.push(b);
} }
OperandValue::Ref(op_place_val) => { OperandValue::Ref(op_place_val) => {
let mut llval = op_place_val.llval; let mut llval = op_place_val.llval;
@ -676,13 +673,13 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
// We store bools as `i8` so we need to truncate to `i1`. // We store bools as `i8` so we need to truncate to `i1`.
llval = self.to_immediate_scalar(llval, scalar); llval = self.to_immediate_scalar(llval, scalar);
} }
call_args.push(llval); llargs.push(llval);
} }
} }
} }
// FIXME directly use the llvm intrinsic adjustment functions here // FIXME directly use the llvm intrinsic adjustment functions here
let llret = self.call(fn_ty, None, None, fn_ptr, &call_args, None, None); let llret = self.call(fn_ty, None, None, fn_ptr, &llargs, None, None);
if is_cleanup { if is_cleanup {
self.apply_attrs_to_cleanup_callsite(llret); self.apply_attrs_to_cleanup_callsite(llret);
} }
@ -723,8 +720,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
} }
fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): implement. unimplemented!();
self.context.new_rvalue_from_int(self.int_type, 0)
} }
} }

File diff suppressed because it is too large Load diff

View file

@ -76,6 +76,7 @@ use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use back::lto::{ThinBuffer, ThinData};
use gccjit::{CType, Context, OptimizationLevel}; use gccjit::{CType, Context, OptimizationLevel};
#[cfg(feature = "master")] #[cfg(feature = "master")]
use gccjit::{TargetInfo, Version}; use gccjit::{TargetInfo, Version};
@ -86,9 +87,7 @@ use rustc_codegen_ssa::back::write::{
}; };
use rustc_codegen_ssa::base::codegen_crate; use rustc_codegen_ssa::base::codegen_crate;
use rustc_codegen_ssa::target_features::cfg_target_feature; use rustc_codegen_ssa::target_features::cfg_target_feature;
use rustc_codegen_ssa::traits::{ use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, WriteBackendMethods};
CodegenBackend, ExtraBackendMethods, ThinBufferMethods, WriteBackendMethods,
};
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig}; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig};
use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::profiling::SelfProfilerRef;
@ -178,6 +177,8 @@ pub struct GccCodegenBackend {
lto_supported: Arc<AtomicBool>, lto_supported: Arc<AtomicBool>,
} }
static LTO_SUPPORTED: AtomicBool = AtomicBool::new(false);
fn load_libgccjit_if_needed(libgccjit_target_lib_file: &Path) { fn load_libgccjit_if_needed(libgccjit_target_lib_file: &Path) {
if gccjit::is_loaded() { if gccjit::is_loaded() {
// Do not load a libgccjit second time. // Do not load a libgccjit second time.
@ -233,8 +234,6 @@ impl CodegenBackend for GccCodegenBackend {
#[cfg(feature = "master")] #[cfg(feature = "master")]
{ {
gccjit::set_lang_name(c"GNU Rust");
let target_cpu = target_cpu(sess); let target_cpu = target_cpu(sess);
// Get the second TargetInfo with the correct CPU features by setting the arch. // Get the second TargetInfo with the correct CPU features by setting the arch.
@ -250,6 +249,7 @@ impl CodegenBackend for GccCodegenBackend {
#[cfg(feature = "master")] #[cfg(feature = "master")]
{ {
let lto_supported = gccjit::is_lto_supported(); let lto_supported = gccjit::is_lto_supported();
LTO_SUPPORTED.store(lto_supported, Ordering::SeqCst);
self.lto_supported.store(lto_supported, Ordering::SeqCst); self.lto_supported.store(lto_supported, Ordering::SeqCst);
gccjit::set_global_personality_function_name(b"rust_eh_personality\0"); gccjit::set_global_personality_function_name(b"rust_eh_personality\0");
@ -279,10 +279,6 @@ impl CodegenBackend for GccCodegenBackend {
} }
} }
fn thin_lto_supported(&self) -> bool {
false
}
fn provide(&self, providers: &mut Providers) { fn provide(&self, providers: &mut Providers) {
providers.queries.global_backend_features = providers.queries.global_backend_features =
|tcx, ()| gcc_util::global_gcc_features(tcx.sess) |tcx, ()| gcc_util::global_gcc_features(tcx.sess)
@ -423,19 +419,11 @@ unsafe impl Send for SyncContext {}
// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "CodegenBackend::supports_parallel()". // FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "CodegenBackend::supports_parallel()".
unsafe impl Sync for SyncContext {} unsafe impl Sync for SyncContext {}
pub struct ThinBuffer;
impl ThinBufferMethods for ThinBuffer {
fn data(&self) -> &[u8] {
&[]
}
}
impl WriteBackendMethods for GccCodegenBackend { impl WriteBackendMethods for GccCodegenBackend {
type Module = GccContext; type Module = GccContext;
type TargetMachine = (); type TargetMachine = ();
type ModuleBuffer = ModuleBuffer; type ModuleBuffer = ModuleBuffer;
type ThinData = (); type ThinData = ThinData;
type ThinBuffer = ThinBuffer; type ThinBuffer = ThinBuffer;
fn run_and_optimize_fat_lto( fn run_and_optimize_fat_lto(
@ -452,16 +440,16 @@ impl WriteBackendMethods for GccCodegenBackend {
} }
fn run_thin_lto( fn run_thin_lto(
_cgcx: &CodegenContext, cgcx: &CodegenContext,
_prof: &SelfProfilerRef, prof: &SelfProfilerRef,
_dcx: DiagCtxtHandle<'_>, dcx: DiagCtxtHandle<'_>,
// FIXME(bjorn3): Limit LTO exports to these symbols // FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String], _exported_symbols_for_lto: &[String],
_each_linked_rlib_for_lto: &[PathBuf], each_linked_rlib_for_lto: &[PathBuf],
_modules: Vec<(String, Self::ThinBuffer)>, modules: Vec<(String, Self::ThinBuffer)>,
_cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) { ) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) {
unreachable!() back::lto::run_thin(cgcx, prof, dcx, each_linked_rlib_for_lto, modules, cached_modules)
} }
fn print_pass_timings(&self) { fn print_pass_timings(&self) {
@ -483,13 +471,13 @@ impl WriteBackendMethods for GccCodegenBackend {
} }
fn optimize_thin( fn optimize_thin(
_cgcx: &CodegenContext, cgcx: &CodegenContext,
_prof: &SelfProfilerRef, _prof: &SelfProfilerRef,
_shared_emitter: &SharedEmitter, _shared_emitter: &SharedEmitter,
_tm_factory: TargetMachineFactoryFn<Self>, _tm_factory: TargetMachineFactoryFn<Self>,
_thin: ThinModule<Self>, thin: ThinModule<Self>,
) -> ModuleCodegen<Self::Module> { ) -> ModuleCodegen<Self::Module> {
unreachable!() back::lto::optimize_thin_module(thin, cgcx)
} }
fn codegen( fn codegen(
@ -502,8 +490,8 @@ impl WriteBackendMethods for GccCodegenBackend {
back::write::codegen(cgcx, prof, shared_emitter, module, config) back::write::codegen(cgcx, prof, shared_emitter, module, config)
} }
fn prepare_thin(_module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) { fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
unreachable!() back::lto::prepare_thin(module)
} }
fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) { fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {

View file

@ -13,7 +13,7 @@ use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::{bug, ty}; use rustc_middle::{bug, ty};
use crate::common::TypeReflection; use crate::common::TypeReflection;
use crate::context::{CodegenCx, new_array_type}; use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt; use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
@ -311,7 +311,7 @@ impl<'gcc, 'tcx> BaseTypeCodegenMethods for CodegenCx<'gcc, 'tcx> {
len = 0; len = 0;
} }
new_array_type(self.context, None, ty, len) self.context.new_array_type(None, ty, len)
} }
} }

View file

@ -11,4 +11,4 @@ tests/run-make/foreign-exceptions/
tests/run-make/glibc-staticlib-args/ tests/run-make/glibc-staticlib-args/
tests/run-make/lto-smoke-c/ tests/run-make/lto-smoke-c/
tests/run-make/return-non-c-like-enum/ tests/run-make/return-non-c-like-enum/
tests/run-make/short-ice

View file

@ -1 +0,0 @@
tests/ui/simd/intrinsic/splat.rs

View file

@ -89,13 +89,11 @@ tests/ui/thir-print/offset_of.rs
tests/ui/iterators/rangefrom-overflow-debug.rs tests/ui/iterators/rangefrom-overflow-debug.rs
tests/ui/iterators/rangefrom-overflow-overflow-checks.rs tests/ui/iterators/rangefrom-overflow-overflow-checks.rs
tests/ui/iterators/iter-filter-count-debug-check.rs tests/ui/iterators/iter-filter-count-debug-check.rs
tests/ui/eii/linking/codegen_single_crate.rs tests/ui/eii/codegen_single_crate.rs
tests/ui/eii/linking/codegen_cross_crate.rs tests/ui/eii/codegen_cross_crate.rs
tests/ui/eii/default/local_crate.rs tests/ui/eii/default/local_crate.rs
tests/ui/eii/duplicate/multiple_impls.rs tests/ui/eii/multiple_impls.rs
tests/ui/eii/default/call_default.rs tests/ui/eii/default/call_default.rs
tests/ui/eii/linking/same-symbol.rs tests/ui/eii/same-symbol.rs
tests/ui/eii/privacy1.rs tests/ui/eii/privacy1.rs
tests/ui/eii/default/call_impl.rs tests/ui/eii/default/call_impl.rs
tests/ui/c-variadic/copy.rs
tests/ui/asm/x86_64/global_asm_escape.rs

View file

@ -1,38 +0,0 @@
// Compiler:
//
// Run-time:
// status: 0
// FIXME: Remove this test once rustc's `./tests/codegen/riscv-abi/call-llvm-intrinsics.rs`
// stops ignoring GCC backend.
#![feature(link_llvm_intrinsics)]
#![allow(internal_features)]
struct A;
impl Drop for A {
fn drop(&mut self) {
println!("A");
}
}
extern "C" {
#[link_name = "llvm.sqrt.f32"]
fn sqrt(x: f32) -> f32;
}
pub fn do_call() {
let _a = A;
unsafe {
// Ensure that we `call` LLVM intrinsics instead of trying to `invoke` them
// CHECK: store float 4.000000e+00, float* %{{.}}, align 4
// CHECK: call float @llvm.sqrt.f32(float %{{.}}
sqrt(4.0);
}
}
fn main() {
do_call();
}

View file

@ -1,102 +0,0 @@
// Compiler:
//
// Run-time:
// status: 0
// FIXME: Remove this test once <tests/run-make/simd-ffi/simd.rs> stops
// ignoring GCC backend.
#![allow(internal_features, non_camel_case_types)]
// we can compile to a variety of platforms, because we don't need
// cross-compiled standard libraries.
#![feature(no_core, auto_traits)]
#![no_core]
#![feature(repr_simd, simd_ffi, link_llvm_intrinsics, lang_items, rustc_attrs)]
#[derive(Copy)]
#[repr(simd)]
pub struct f32x4([f32; 4]);
extern "C" {
#[link_name = "llvm.sqrt.v4f32"]
fn vsqrt(x: f32x4) -> f32x4;
}
pub fn foo(x: f32x4) -> f32x4 {
unsafe { vsqrt(x) }
}
#[derive(Copy)]
#[repr(simd)]
pub struct i32x4([i32; 4]);
extern "C" {
// _mm_sll_epi32
#[cfg(all(any(target_arch = "x86", target_arch = "x86-64"), target_feature = "sse2"))]
#[link_name = "llvm.x86.sse2.psll.d"]
fn integer(a: i32x4, b: i32x4) -> i32x4;
// vmaxq_s32
#[cfg(target_arch = "arm")]
#[link_name = "llvm.arm.neon.vmaxs.v4i32"]
fn integer(a: i32x4, b: i32x4) -> i32x4;
// vmaxq_s32
#[cfg(target_arch = "aarch64")]
#[link_name = "llvm.aarch64.neon.maxs.v4i32"]
fn integer(a: i32x4, b: i32x4) -> i32x4;
// Use a generic LLVM intrinsic to do type checking on other platforms
#[cfg(not(any(
all(any(target_arch = "x86", target_arch = "x86-64"), target_feature = "sse2"),
target_arch = "arm",
target_arch = "aarch64"
)))]
#[link_name = "llvm.smax.v4i32"]
fn integer(a: i32x4, b: i32x4) -> i32x4;
}
pub fn bar(a: i32x4, b: i32x4) -> i32x4 {
unsafe { integer(a, b) }
}
#[lang = "pointee_sized"]
pub trait PointeeSized {}
#[lang = "meta_sized"]
pub trait MetaSized: PointeeSized {}
#[lang = "sized"]
pub trait Sized: MetaSized {}
#[lang = "copy"]
pub trait Copy {}
impl Copy for f32 {}
impl Copy for i32 {}
impl Copy for [f32; 4] {}
impl Copy for [i32; 4] {}
pub mod marker {
pub use Copy;
}
#[lang = "freeze"]
auto trait Freeze {}
#[macro_export]
#[rustc_builtin_macro]
macro_rules! Copy {
() => {};
}
#[macro_export]
#[rustc_builtin_macro]
macro_rules! derive {
() => {};
}
#[lang = "start"]
fn start<T>(_main: fn() -> T, _argc: isize, _argv: *const *const u8, _sigpipe: u8) -> isize {
0
}
fn main() {}

View file

@ -1,27 +0,0 @@
// Compiler:
//
// Run-time:
// status: 0
use std::arch::asm;
fn exit_syscall(status: i32) -> ! {
#[cfg(target_arch = "x86_64")]
unsafe {
asm!(
"syscall",
in("rax") 60,
in("rdi") status,
options(noreturn)
);
}
#[cfg(not(target_arch = "x86_64"))]
std::process::exit(status);
}
fn main() {
// Used to crash with rustc_codegen_gcc.
exit_syscall(0);
std::process::exit(1);
}

View file

@ -12,7 +12,7 @@ def run_command(command, cwd=None):
sys.exit(1) sys.exit(1)
def clone_repository(repo_name, path, repo_url, sub_paths): def clone_repository(repo_name, path, repo_url, branch="master", sub_paths=None):
if os.path.exists(path): if os.path.exists(path):
while True: while True:
choice = input("There is already a `{}` folder, do you want to update it? [y/N]".format(path)) choice = input("There is already a `{}` folder, do you want to update it? [y/N]".format(path))
@ -21,15 +21,18 @@ def clone_repository(repo_name, path, repo_url, sub_paths):
return return
elif choice.lower() == "y": elif choice.lower() == "y":
print("Updating repository...") print("Updating repository...")
run_command(["git", "pull", "origin", "main"], cwd=path) run_command(["git", "pull", "origin", branch], cwd=path)
return return
else: else:
print("Didn't understand answer...") print("Didn't understand answer...")
print("Cloning {} repository...".format(repo_name)) print("Cloning {} repository...".format(repo_name))
run_command(["git", "clone", repo_url, "--filter=tree:0", "--no-checkout", path]) if sub_paths is None:
run_command(["git", "sparse-checkout", "init"], cwd=path) run_command(["git", "clone", repo_url, "--depth", "1", path])
run_command(["git", "sparse-checkout", "set", *sub_paths], cwd=path) else:
run_command(["git", "checkout"], cwd=path) run_command(["git", "clone", repo_url, "--filter=tree:0", "--no-checkout", path])
run_command(["git", "sparse-checkout", "init"], cwd=path)
run_command(["git", "sparse-checkout", "set", *sub_paths], cwd=path)
run_command(["git", "checkout"], cwd=path)
def append_intrinsic(array, intrinsic_name, translation): def append_intrinsic(array, intrinsic_name, translation):
@ -42,36 +45,121 @@ def convert_to_string(content):
return content return content
def extract_intrinsics_from_llvm(llvm_path): def extract_intrinsics_from_llvm(llvm_path, intrinsics):
intrinsics = {} command = ["llvm-tblgen", "llvm/IR/Intrinsics.td"]
command = ["llvm-tblgen", "llvm/IR/Intrinsics.td", "--dump-json"]
cwd = os.path.join(llvm_path, "llvm/include") cwd = os.path.join(llvm_path, "llvm/include")
print("=> Running command `{}` from `{}`".format(command, cwd)) print("=> Running command `{}` from `{}`".format(command, cwd))
p = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE) p = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE)
output, err = p.communicate() output, err = p.communicate()
content = json.loads(convert_to_string(output)) lines = convert_to_string(output).splitlines()
for intrinsic in content: pos = 0
data = content[intrinsic] while pos < len(lines):
if not isinstance(data, dict): line = lines[pos]
if not line.startswith("def "):
pos += 1
continue continue
current_arch = data.get("TargetPrefix") intrinsic = line.split(" ")[1].strip()
builtin_name = data.get("ClangBuiltinName") content = line
if current_arch is None or current_arch == "" or builtin_name is None: while pos < len(lines):
continue line = lines[pos].split(" // ")[0].strip()
intrinsic = intrinsic.split("_") content += line
if len(intrinsic) < 2 or intrinsic[0] != "int": pos += 1
continue if line == "}":
intrinsic[0] = "llvm" break
intrinsic = ".".join(intrinsic) entries = re.findall('string ClangBuiltinName = "(\\w+)";', content)
if current_arch not in intrinsics: current_arch = re.findall('string TargetPrefix = "(\\w+)";', content)
intrinsics[current_arch] = [] if len(entries) == 1 and len(current_arch) == 1:
append_intrinsic(intrinsics[current_arch], intrinsic, builtin_name) current_arch = current_arch[0]
intrinsic = intrinsic.split("_")
return intrinsics if len(intrinsic) < 2 or intrinsic[0] != "int":
continue
intrinsic[0] = "llvm"
intrinsic = ".".join(intrinsic)
if current_arch not in intrinsics:
intrinsics[current_arch] = []
append_intrinsic(intrinsics[current_arch], intrinsic, entries[0])
def update_intrinsics(llvm_path): def append_translation(json_data, p, array):
intrinsics = extract_intrinsics_from_llvm(llvm_path) it = json_data["index"][p]
content = it["docs"].split('`')
if len(content) != 5:
return
append_intrinsic(array, content[1], content[3])
def extract_intrinsics_from_llvmint(llvmint, intrinsics):
archs = [
"AMDGPU",
"aarch64",
"arm",
"cuda",
"hexagon",
"mips",
"nvvm",
"ppc",
"ptx",
"x86",
"xcore",
]
json_file = os.path.join(llvmint, "target/doc/llvmint.json")
# We need to regenerate the documentation!
run_command(
["cargo", "rustdoc", "--", "-Zunstable-options", "--output-format", "json"],
cwd=llvmint,
)
with open(json_file, "r", encoding="utf8") as f:
json_data = json.loads(f.read())
for p in json_data["paths"]:
it = json_data["paths"][p]
if it["crate_id"] != 0:
# This is from an external crate.
continue
if it["kind"] != "function":
# We're only looking for functions.
continue
# if len(it["path"]) == 2:
# # This is a "general" intrinsic, not bound to a specific arch.
# append_translation(json_data, p, general)
# continue
if len(it["path"]) != 3 or it["path"][1] not in archs:
continue
arch = it["path"][1]
if arch not in intrinsics:
intrinsics[arch] = []
append_translation(json_data, p, intrinsics[arch])
def fill_intrinsics(intrinsics, from_intrinsics, all_intrinsics):
for arch in from_intrinsics:
if arch not in intrinsics:
intrinsics[arch] = []
for entry in from_intrinsics[arch]:
if entry[0] in all_intrinsics:
if all_intrinsics[entry[0]] == entry[1]:
# This is a "full" duplicate, both the LLVM instruction and the GCC
# translation are the same.
continue
intrinsics[arch].append((entry[0], entry[1], True))
else:
intrinsics[arch].append((entry[0], entry[1], False))
all_intrinsics[entry[0]] = entry[1]
def update_intrinsics(llvm_path, llvmint, llvmint2):
intrinsics_llvm = {}
intrinsics_llvmint = {}
all_intrinsics = {}
extract_intrinsics_from_llvm(llvm_path, intrinsics_llvm)
extract_intrinsics_from_llvmint(llvmint, intrinsics_llvmint)
extract_intrinsics_from_llvmint(llvmint2, intrinsics_llvmint)
intrinsics = {}
# We give priority to translations from LLVM over the ones from llvmint.
fill_intrinsics(intrinsics, intrinsics_llvm, all_intrinsics)
fill_intrinsics(intrinsics, intrinsics_llvmint, all_intrinsics)
archs = [arch for arch in intrinsics] archs = [arch for arch in intrinsics]
archs.sort() archs.sort()
@ -85,41 +173,33 @@ def update_intrinsics(llvm_path):
# Since all intrinsic names start with "llvm.", we skip that prefix. # Since all intrinsic names start with "llvm.", we skip that prefix.
print("Updating content of `{}`...".format(output_file)) print("Updating content of `{}`...".format(output_file))
with open(output_file, "w", encoding="utf8") as out: with open(output_file, "w", encoding="utf8") as out:
out.write("""// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py` out.write("// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py`\n")
// DO NOT EDIT IT! out.write("// DO NOT EDIT IT!\n")
/// Translate a given LLVM intrinsic name to an equivalent GCC one. out.write("/// Translate a given LLVM intrinsic name to an equivalent GCC one.\n")
fn map_arch_intrinsic(full_name:&str)-> &'static str { out.write("fn map_arch_intrinsic(full_name:&str)->&'static str{\n")
let Some(name) = full_name.strip_prefix("llvm.") else { unimplemented!("***** unsupported LLVM intrinsic {}", full_name) }; out.write('let Some(name) = full_name.strip_prefix("llvm.") else { unimplemented!("***** unsupported LLVM intrinsic {}", full_name) };\n')
let Some((arch, name)) = name.split_once('.') else { unimplemented!("***** unsupported LLVM intrinsic llvm.{}", name) }; out.write('let Some((arch, name)) = name.split_once(\'.\') else { unimplemented!("***** unsupported LLVM intrinsic {}", name) };\n')
let old_arch_res = old_archs(arch, name); out.write("match arch {\n")
if let ArchCheckResult::Ok(res) = old_arch_res {
return res;
}
match arch {""")
for arch in archs: for arch in archs:
if len(intrinsics[arch]) == 0: if len(intrinsics[arch]) == 0:
continue continue
attribute = "#[expect(non_snake_case)]" if arch[0].isupper() else "" attribute = "#[expect(non_snake_case)]" if arch[0].isupper() else ""
out.write("\"{}\" => {{ {} fn {}(name: &str,full_name:&str) -> &'static str {{ match name {{".format(arch, attribute, arch)) out.write("\"{}\" => {{ {} fn {}(name: &str,full_name:&str) -> &'static str {{ match name {{".format(arch, attribute, arch))
intrinsics[arch].sort(key=lambda x: (x[0], x[1])) intrinsics[arch].sort(key=lambda x: (x[0], x[2]))
out.write(' // {}\n'.format(arch)) out.write(' // {}\n'.format(arch))
for entry in intrinsics[arch]: for entry in intrinsics[arch]:
llvm_name = entry[0].removeprefix("llvm."); llvm_name = entry[0].removeprefix("llvm.");
llvm_name = llvm_name.removeprefix(arch); llvm_name = llvm_name.removeprefix(arch);
llvm_name = llvm_name.removeprefix("."); llvm_name = llvm_name.removeprefix(".");
if "_round_mask" in entry[1]: if entry[2] is True: # if it is a duplicate
out.write(' // [DUPLICATE]: "{}" => "{}",\n'.format(llvm_name, entry[1]))
elif "_round_mask" in entry[1]:
out.write(' // [INVALID CONVERSION]: "{}" => "{}",\n'.format(llvm_name, entry[1])) out.write(' // [INVALID CONVERSION]: "{}" => "{}",\n'.format(llvm_name, entry[1]))
else: else:
out.write(' "{}" => "{}",\n'.format(llvm_name, entry[1])) out.write(' "{}" => "{}",\n'.format(llvm_name, entry[1]))
out.write(' _ => unimplemented!("***** unsupported LLVM intrinsic {full_name}"),\n') out.write(' _ => unimplemented!("***** unsupported LLVM intrinsic {full_name}"),\n')
out.write("}} }} {}(name,full_name) }}\n,".format(arch)) out.write("}} }} {}(name,full_name) }}\n,".format(arch))
out.write(""" _ => { out.write(' _ => unimplemented!("***** unsupported LLVM architecture {arch}, intrinsic:{full_name}"),\n')
match old_arch_res {
ArchCheckResult::UnknownIntrinsic => unimplemented!("***** unsupported LLVM intrinsic {full_name}"),
ArchCheckResult::UnknownArch => unimplemented!("***** unsupported LLVM architecture {arch}, intrinsic: {full_name}"),
ArchCheckResult::Ok(_) => unreachable!(),
}
}""")
out.write("}\n}") out.write("}\n}")
subprocess.call(["rustfmt", output_file]) subprocess.call(["rustfmt", output_file])
print("Done!") print("Done!")
@ -130,21 +210,35 @@ def main():
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
"llvm-project", "llvm-project",
) )
llvmint_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"llvmint",
)
llvmint2_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"llvmint-2",
)
# First, we clone the LLVM repository if it's not already here. # First, we clone the LLVM repository if it's not already here.
clone_repository( clone_repository(
"llvm-project", "llvm-project",
llvm_path, llvm_path,
"https://github.com/llvm/llvm-project", "https://github.com/llvm/llvm-project",
["llvm/include/llvm/IR", "llvm/include/llvm/CodeGen/"], branch="main",
sub_paths=["llvm/include/llvm/IR", "llvm/include/llvm/CodeGen/"],
) )
update_intrinsics(llvm_path) clone_repository(
"llvmint",
llvmint_path,
"https://github.com/GuillaumeGomez/llvmint",
)
clone_repository(
"llvmint2",
llvmint2_path,
"https://github.com/antoyo/llvmint",
)
update_intrinsics(llvm_path, llvmint_path, llvmint2_path)
# llvm-tblgen can be built with:
#
# mkdir llvm-tblgen-build && cd llvm-tblgen-build
# cmake -G Ninja -DLLVM_ENABLE_PROJECTS="llvm" -DCMAKE_BUILD_TYPE=Release ../llvm
# ninja llvm-tblgen
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View file

@ -31,6 +31,7 @@ rustc_llvm = { path = "../rustc_llvm" }
rustc_macros = { path = "../rustc_macros" } rustc_macros = { path = "../rustc_macros" }
rustc_metadata = { path = "../rustc_metadata" } rustc_metadata = { path = "../rustc_metadata" }
rustc_middle = { path = "../rustc_middle" } rustc_middle = { path = "../rustc_middle" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_sanitizers = { path = "../rustc_sanitizers" } rustc_sanitizers = { path = "../rustc_sanitizers" }
rustc_session = { path = "../rustc_session" } rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" } rustc_span = { path = "../rustc_span" }

View file

@ -387,27 +387,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let pair = self.insert_value(pair, high, 1); let pair = self.insert_value(pair, high, 1);
pair pair
} }
// FIXME move into the branch below when LLVM 22 is the lowest version we support.
sym::carryless_mul if crate::llvm_util::get_version() >= (22, 0, 0) => {
let ty = args[0].layout.ty;
if !ty.is_integral() {
tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
name,
ty,
});
return Ok(());
}
let (size, _) = ty.int_size_and_signed(self.tcx);
let width = size.bits();
let llty = self.type_ix(width);
let lhs = args[0].immediate();
let rhs = args[1].immediate();
self.call_intrinsic("llvm.clmul", &[llty], &[lhs, rhs])
}
sym::ctlz sym::ctlz
| sym::ctlz_nonzero | sym::ctlz_nonzero
| sym::cttz | sym::cttz
@ -2805,7 +2784,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
| sym::simd_ctlz | sym::simd_ctlz
| sym::simd_ctpop | sym::simd_ctpop
| sym::simd_cttz | sym::simd_cttz
| sym::simd_carryless_mul
| sym::simd_funnel_shl | sym::simd_funnel_shl
| sym::simd_funnel_shr | sym::simd_funnel_shr
) { ) {
@ -2830,7 +2808,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
sym::simd_cttz => "llvm.cttz", sym::simd_cttz => "llvm.cttz",
sym::simd_funnel_shl => "llvm.fshl", sym::simd_funnel_shl => "llvm.fshl",
sym::simd_funnel_shr => "llvm.fshr", sym::simd_funnel_shr => "llvm.fshr",
sym::simd_carryless_mul => "llvm.clmul",
_ => unreachable!(), _ => unreachable!(),
}; };
let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits(); let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
@ -2856,17 +2833,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
&[vec_ty], &[vec_ty],
&[args[0].immediate(), args[1].immediate(), args[2].immediate()], &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
)), )),
sym::simd_carryless_mul => {
if crate::llvm_util::get_version() >= (22, 0, 0) {
Ok(bx.call_intrinsic(
llvm_intrinsic,
&[vec_ty],
&[args[0].immediate(), args[1].immediate()],
))
} else {
span_bug!(span, "`simd_carryless_mul` needs LLVM 22 or higher");
}
}
_ => unreachable!(), _ => unreachable!(),
}; };
} }

View file

@ -354,14 +354,7 @@ impl CodegenBackend for LlvmCodegenBackend {
} }
fn replaced_intrinsics(&self) -> Vec<Symbol> { fn replaced_intrinsics(&self) -> Vec<Symbol> {
let mut will_not_use_fallback = vec![sym::unchecked_funnel_shl, sym::unchecked_funnel_shr, sym::carrying_mul_add]
vec![sym::unchecked_funnel_shl, sym::unchecked_funnel_shr, sym::carrying_mul_add];
if llvm_util::get_version() >= (22, 0, 0) {
will_not_use_fallback.push(sym::carryless_mul);
}
will_not_use_fallback
} }
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Box<dyn Any> { fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Box<dyn Any> {

View file

@ -710,6 +710,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef { val: operand.val, layout, move_annotation: None } OperandRef { val: operand.val, layout, move_annotation: None }
} }
mir::Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in codegen"), mir::Rvalue::CopyForDeref(_) => bug!("`CopyForDeref` in codegen"),
mir::Rvalue::ShallowInitBox(..) => bug!("`ShallowInitBox` in codegen"),
} }
} }

View file

@ -80,11 +80,6 @@ pub trait CodegenBackend {
vec![] vec![]
} }
/// Is ThinLTO supported by this backend?
fn thin_lto_supported(&self) -> bool {
true
}
/// Value printed by `--print=backend-has-zstd`. /// Value printed by `--print=backend-has-zstd`.
/// ///
/// Used by compiletest to determine whether tests involving zstd compression /// Used by compiletest to determine whether tests involving zstd compression

View file

@ -7,10 +7,9 @@ use std::ops::Deref;
use rustc_data_structures::assert_matches; use rustc_data_structures::assert_matches;
use rustc_errors::{Diag, ErrorGuaranteed}; use rustc_errors::{Diag, ErrorGuaranteed};
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def::DefKind; use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_hir::{self as hir, LangItem, find_attr}; use rustc_hir::{self as hir, LangItem};
use rustc_index::bit_set::DenseBitSet; use rustc_index::bit_set::DenseBitSet;
use rustc_infer::infer::TyCtxtInferExt; use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::visit::Visitor;
@ -216,7 +215,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
return; return;
} }
if !find_attr!(tcx.get_all_attrs(def_id), AttributeKind::RustcDoNotConstCheck) { if !tcx.has_attr(def_id, sym::rustc_do_not_const_check) {
self.visit_body(body); self.visit_body(body);
} }
@ -646,6 +645,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
Rvalue::Cast(_, _, _) => {} Rvalue::Cast(_, _, _) => {}
Rvalue::ShallowInitBox(_, _) => {}
Rvalue::UnaryOp(op, operand) => { Rvalue::UnaryOp(op, operand) => {
let ty = operand.ty(self.body, self.tcx); let ty = operand.ty(self.body, self.tcx);
match op { match op {
@ -814,10 +815,6 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
}); });
} }
if self.tcx.fn_sig(callee).skip_binder().c_variadic() {
self.check_op(ops::FnCallCVariadic)
}
// At this point, we are calling a function, `callee`, whose `DefId` is known... // At this point, we are calling a function, `callee`, whose `DefId` is known...
// `begin_panic` and `panic_display` functions accept generic // `begin_panic` and `panic_display` functions accept generic
@ -848,6 +845,13 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
return; return;
} }
// This can be called on stable via the `vec!` macro.
if tcx.is_lang_item(callee, LangItem::ExchangeMalloc) {
self.check_op(ops::HeapAllocation);
// Allow this call, skip all the checks below.
return;
}
// Intrinsics are language primitives, not regular calls, so treat them separately. // Intrinsics are language primitives, not regular calls, so treat them separately.
if let Some(intrinsic) = tcx.intrinsic(callee) { if let Some(intrinsic) = tcx.intrinsic(callee) {
if !tcx.is_const_fn(callee) { if !tcx.is_const_fn(callee) {

View file

@ -75,27 +75,6 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
} }
} }
/// A c-variadic function call.
#[derive(Debug)]
pub(crate) struct FnCallCVariadic;
impl<'tcx> NonConstOp<'tcx> for FnCallCVariadic {
fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
Status::Unstable {
gate: sym::const_c_variadic,
gate_already_checked: false,
safe_to_expose_on_stable: false,
is_function_call: true,
}
}
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
ccx.tcx.sess.create_feature_err(
errors::NonConstCVariadicCall { span, kind: ccx.const_kind() },
sym::const_c_variadic,
)
}
}
/// A call to a function that is in a trait, or has trait bounds that make it conditionally-const. /// A call to a function that is in a trait, or has trait bounds that make it conditionally-const.
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct ConditionallyConstCall<'tcx> { pub(crate) struct ConditionallyConstCall<'tcx> {
@ -561,6 +540,18 @@ impl<'tcx> NonConstOp<'tcx> for Coroutine {
} }
} }
#[derive(Debug)]
pub(crate) struct HeapAllocation;
impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
ccx.dcx().create_err(errors::UnallowedHeapAllocations {
span,
kind: ccx.const_kind(),
teach: ccx.tcx.sess.teach(E0010),
})
}
}
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct InlineAsm; pub(crate) struct InlineAsm;
impl<'tcx> NonConstOp<'tcx> for InlineAsm { impl<'tcx> NonConstOp<'tcx> for InlineAsm {

View file

@ -1,5 +1,3 @@
use rustc_hir::attrs::AttributeKind;
use rustc_hir::find_attr;
use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::{self, BasicBlock, Location}; use rustc_middle::mir::{self, BasicBlock, Location};
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
@ -36,7 +34,7 @@ pub fn check_live_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
return; return;
} }
if find_attr!(tcx.get_all_attrs(body.source.def_id()), AttributeKind::RustcDoNotConstCheck) { if tcx.has_attr(body.source.def_id(), sym::rustc_do_not_const_check) {
return; return;
} }

View file

@ -237,7 +237,8 @@ where
Rvalue::Use(operand) Rvalue::Use(operand)
| Rvalue::Repeat(operand, _) | Rvalue::Repeat(operand, _)
| Rvalue::UnaryOp(_, operand) | Rvalue::UnaryOp(_, operand)
| Rvalue::Cast(_, operand, _) => in_operand::<Q, _>(cx, in_local, operand), | Rvalue::Cast(_, operand, _)
| Rvalue::ShallowInitBox(operand, _) => in_operand::<Q, _>(cx, in_local, operand),
Rvalue::BinaryOp(_, box (lhs, rhs)) => { Rvalue::BinaryOp(_, box (lhs, rhs)) => {
in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs) in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)

View file

@ -192,6 +192,7 @@ where
} }
mir::Rvalue::Cast(..) mir::Rvalue::Cast(..)
| mir::Rvalue::ShallowInitBox(..)
| mir::Rvalue::Use(..) | mir::Rvalue::Use(..)
| mir::Rvalue::CopyForDeref(..) | mir::Rvalue::CopyForDeref(..)
| mir::Rvalue::ThreadLocalRef(..) | mir::Rvalue::ThreadLocalRef(..)

View file

@ -6,11 +6,10 @@ use rustc_abi::{Align, Size};
use rustc_ast::Mutability; use rustc_ast::Mutability;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap, IndexEntry}; use rustc_data_structures::fx::{FxHashMap, FxIndexMap, IndexEntry};
use rustc_errors::msg; use rustc_errors::msg;
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::{self as hir, CRATE_HIR_ID, LangItem, find_attr}; use rustc_hir::{self as hir, CRATE_HIR_ID, LangItem};
use rustc_middle::mir::AssertMessage; use rustc_middle::mir::AssertMessage;
use rustc_middle::mir::interpret::ReportedErrorInfo; use rustc_middle::mir::interpret::{Pointer, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt; use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout, ValidityRequirement}; use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout, ValidityRequirement};
use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::ty::{self, Ty, TyCtxt};
@ -23,7 +22,7 @@ use super::error::*;
use crate::errors::{LongRunning, LongRunningWarn}; use crate::errors::{LongRunning, LongRunningWarn};
use crate::interpret::{ use crate::interpret::{
self, AllocId, AllocInit, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, self, AllocId, AllocInit, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame,
GlobalAlloc, ImmTy, InterpCx, InterpResult, OpTy, PlaceTy, Pointer, RangeSet, Scalar, GlobalAlloc, ImmTy, InterpCx, InterpResult, OpTy, PlaceTy, RangeSet, Scalar,
compile_time_machine, err_inval, interp_ok, throw_exhaust, throw_inval, throw_ub, compile_time_machine, err_inval, interp_ok, throw_exhaust, throw_inval, throw_ub,
throw_ub_custom, throw_unsup, throw_unsup_format, throw_ub_custom, throw_unsup, throw_unsup_format,
}; };
@ -236,7 +235,7 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
if self.tcx.is_lang_item(def_id, LangItem::PanicDisplay) if self.tcx.is_lang_item(def_id, LangItem::PanicDisplay)
|| self.tcx.is_lang_item(def_id, LangItem::BeginPanic) || self.tcx.is_lang_item(def_id, LangItem::BeginPanic)
{ {
let args = Self::copy_fn_args(args); let args = self.copy_fn_args(args);
// &str or &&str // &str or &&str
assert!(args.len() == 1); assert!(args.len() == 1);
@ -441,9 +440,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
// sensitive check here. But we can at least rule out functions that are not const at // sensitive check here. But we can at least rule out functions that are not const at
// all. That said, we have to allow calling functions inside a `const trait`. These // all. That said, we have to allow calling functions inside a `const trait`. These
// *are* const-checked! // *are* const-checked!
if !ecx.tcx.is_const_fn(def) if !ecx.tcx.is_const_fn(def) || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check) {
|| find_attr!(ecx.tcx.get_all_attrs(def), AttributeKind::RustcDoNotConstCheck)
{
// We certainly do *not* want to actually call the fn // We certainly do *not* want to actually call the fn
// though, so be sure we return here. // though, so be sure we return here.
throw_unsup_format!("calling non-const function `{}`", instance) throw_unsup_format!("calling non-const function `{}`", instance)

View file

@ -289,6 +289,31 @@ pub(crate) struct UnallowedOpInConstContext {
pub msg: String, pub msg: String,
} }
#[derive(Diagnostic)]
#[diag(r#"allocations are not allowed in {$kind ->
[const] constant
[static] static
[const_fn] constant function
*[other] {""}
}s"#, code = E0010)]
pub(crate) struct UnallowedHeapAllocations {
#[primary_span]
#[label(
r#"allocation not allowed in {$kind ->
[const] constant
[static] static
[const_fn] constant function
*[other] {""}
}s"#
)]
pub span: Span,
pub kind: ConstContext,
#[note(
"the runtime heap is not yet available at compile-time, so no runtime heap allocations can be created"
)]
pub teach: bool,
}
#[derive(Diagnostic)] #[derive(Diagnostic)]
#[diag(r#"inline assembly is not allowed in {$kind -> #[diag(r#"inline assembly is not allowed in {$kind ->
[const] constant [const] constant
@ -506,19 +531,6 @@ pub struct NonConstClosure {
pub non_or_conditionally: &'static str, pub non_or_conditionally: &'static str,
} }
#[derive(Diagnostic)]
#[diag(r#"calling const c-variadic functions is unstable in {$kind ->
[const] constant
[static] static
[const_fn] constant function
*[other] {""}
}s"#, code = E0015)]
pub struct NonConstCVariadicCall {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
}
#[derive(Subdiagnostic)] #[derive(Subdiagnostic)]
pub enum NonConstClosureNote { pub enum NonConstClosureNote {
#[note("function defined here, but it is not `const`")] #[note("function defined here, but it is not `const`")]
@ -745,13 +757,11 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
WriteToReadOnly(_) => msg!("writing to {$allocation} which is read-only"), WriteToReadOnly(_) => msg!("writing to {$allocation} which is read-only"),
DerefFunctionPointer(_) => msg!("accessing {$allocation} which contains a function"), DerefFunctionPointer(_) => msg!("accessing {$allocation} which contains a function"),
DerefVTablePointer(_) => msg!("accessing {$allocation} which contains a vtable"), DerefVTablePointer(_) => msg!("accessing {$allocation} which contains a vtable"),
DerefVaListPointer(_) => msg!("accessing {$allocation} which contains a variable argument list"),
DerefTypeIdPointer(_) => msg!("accessing {$allocation} which contains a `TypeId`"), DerefTypeIdPointer(_) => msg!("accessing {$allocation} which contains a `TypeId`"),
InvalidBool(_) => msg!("interpreting an invalid 8-bit value as a bool: 0x{$value}"), InvalidBool(_) => msg!("interpreting an invalid 8-bit value as a bool: 0x{$value}"),
InvalidChar(_) => msg!("interpreting an invalid 32-bit value as a char: 0x{$value}"), InvalidChar(_) => msg!("interpreting an invalid 32-bit value as a char: 0x{$value}"),
InvalidTag(_) => msg!("enum value has invalid tag: {$tag}"), InvalidTag(_) => msg!("enum value has invalid tag: {$tag}"),
InvalidFunctionPointer(_) => msg!("using {$pointer} as function pointer but it does not point to a function"), InvalidFunctionPointer(_) => msg!("using {$pointer} as function pointer but it does not point to a function"),
InvalidVaListPointer(_) => msg!("using {$pointer} as variable argument list pointer but it does not point to a variable argument list"),
InvalidVTablePointer(_) => msg!("using {$pointer} as vtable pointer but it does not point to a vtable"), InvalidVTablePointer(_) => msg!("using {$pointer} as vtable pointer but it does not point to a vtable"),
InvalidVTableTrait { .. } => msg!("using vtable for `{$vtable_dyn_type}` but `{$expected_dyn_type}` was expected"), InvalidVTableTrait { .. } => msg!("using vtable for `{$vtable_dyn_type}` but `{$expected_dyn_type}` was expected"),
InvalidStr(_) => msg!("this string is not valid UTF-8: {$err}"), InvalidStr(_) => msg!("this string is not valid UTF-8: {$err}"),
@ -766,9 +776,6 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
} }
AbiMismatchArgument { .. } => msg!("calling a function whose parameter #{$arg_idx} has type {$callee_ty} passing argument of type {$caller_ty}"), AbiMismatchArgument { .. } => msg!("calling a function whose parameter #{$arg_idx} has type {$callee_ty} passing argument of type {$caller_ty}"),
AbiMismatchReturn { .. } => msg!("calling a function with return type {$callee_ty} passing return place of type {$caller_ty}"), AbiMismatchReturn { .. } => msg!("calling a function with return type {$callee_ty} passing return place of type {$caller_ty}"),
VaArgOutOfBounds => "more C-variadic arguments read than were passed".into(),
CVariadicMismatch { ..} => "calling a function where the caller and callee disagree on whether the function is C-variadic".into(),
CVariadicFixedCountMismatch { .. } => msg!("calling a C-variadic function with {$caller} fixed arguments, but the function expects {$callee}"),
} }
} }
@ -793,7 +800,6 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
| InvalidMeta(InvalidMetaKind::TooBig) | InvalidMeta(InvalidMetaKind::TooBig)
| InvalidUninitBytes(None) | InvalidUninitBytes(None)
| DeadLocal | DeadLocal
| VaArgOutOfBounds
| UninhabitedEnumVariantWritten(_) | UninhabitedEnumVariantWritten(_)
| UninhabitedEnumVariantRead(_) => {} | UninhabitedEnumVariantRead(_) => {}
@ -814,10 +820,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
diag.arg("len", len); diag.arg("len", len);
diag.arg("index", index); diag.arg("index", index);
} }
UnterminatedCString(ptr) UnterminatedCString(ptr) | InvalidFunctionPointer(ptr) | InvalidVTablePointer(ptr) => {
| InvalidFunctionPointer(ptr)
| InvalidVaListPointer(ptr)
| InvalidVTablePointer(ptr) => {
diag.arg("pointer", ptr); diag.arg("pointer", ptr);
} }
InvalidVTableTrait { expected_dyn_type, vtable_dyn_type } => { InvalidVTableTrait { expected_dyn_type, vtable_dyn_type } => {
@ -871,7 +874,6 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
WriteToReadOnly(alloc) WriteToReadOnly(alloc)
| DerefFunctionPointer(alloc) | DerefFunctionPointer(alloc)
| DerefVTablePointer(alloc) | DerefVTablePointer(alloc)
| DerefVaListPointer(alloc)
| DerefTypeIdPointer(alloc) => { | DerefTypeIdPointer(alloc) => {
diag.arg("allocation", alloc); diag.arg("allocation", alloc);
} }
@ -908,14 +910,6 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
diag.arg("caller_ty", caller_ty); diag.arg("caller_ty", caller_ty);
diag.arg("callee_ty", callee_ty); diag.arg("callee_ty", callee_ty);
} }
CVariadicMismatch { caller_is_c_variadic, callee_is_c_variadic } => {
diag.arg("caller_is_c_variadic", caller_is_c_variadic);
diag.arg("callee_is_c_variadic", callee_is_c_variadic);
}
CVariadicFixedCountMismatch { caller, callee } => {
diag.arg("caller", caller);
diag.arg("callee", callee);
}
} }
} }
} }

View file

@ -7,20 +7,19 @@ use either::{Left, Right};
use rustc_abi::{self as abi, ExternAbi, FieldIdx, Integer, VariantIdx}; use rustc_abi::{self as abi, ExternAbi, FieldIdx, Integer, VariantIdx};
use rustc_data_structures::assert_matches; use rustc_data_structures::assert_matches;
use rustc_errors::msg; use rustc_errors::msg;
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_hir::find_attr;
use rustc_middle::ty::layout::{IntegerExt, TyAndLayout}; use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
use rustc_middle::ty::{self, AdtDef, Instance, Ty, VariantDef}; use rustc_middle::ty::{self, AdtDef, Instance, Ty, VariantDef};
use rustc_middle::{bug, mir, span_bug}; use rustc_middle::{bug, mir, span_bug};
use rustc_span::sym;
use rustc_target::callconv::{ArgAbi, FnAbi, PassMode}; use rustc_target::callconv::{ArgAbi, FnAbi, PassMode};
use tracing::field::Empty; use tracing::field::Empty;
use tracing::{info, instrument, trace}; use tracing::{info, instrument, trace};
use super::{ use super::{
CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
Projectable, Provenance, ReturnAction, ReturnContinuation, Scalar, interp_ok, throw_ub, Projectable, Provenance, ReturnAction, ReturnContinuation, Scalar, StackPopInfo, interp_ok,
throw_ub_custom, throw_ub, throw_ub_custom, throw_unsup_format,
}; };
use crate::enter_trace_span; use crate::enter_trace_span;
use crate::interpret::EnteredTraceSpan; use crate::interpret::EnteredTraceSpan;
@ -43,22 +42,25 @@ impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
FnArg::InPlace(mplace) => &mplace.layout, FnArg::InPlace(mplace) => &mplace.layout,
} }
} }
}
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
/// original memory occurs. /// original memory occurs.
pub fn copy_fn_arg(&self) -> OpTy<'tcx, Prov> { pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> {
match self { match arg {
FnArg::Copy(op) => op.clone(), FnArg::Copy(op) => op.clone(),
FnArg::InPlace(mplace) => mplace.clone().into(), FnArg::InPlace(mplace) => mplace.clone().into(),
} }
} }
}
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the
/// original memory occurs. /// original memory occurs.
pub fn copy_fn_args(args: &[FnArg<'tcx, M::Provenance>]) -> Vec<OpTy<'tcx, M::Provenance>> { pub fn copy_fn_args(
args.iter().map(|fn_arg| fn_arg.copy_fn_arg()).collect() &self,
args: &[FnArg<'tcx, M::Provenance>],
) -> Vec<OpTy<'tcx, M::Provenance>> {
args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
} }
/// Helper function for argument untupling. /// Helper function for argument untupling.
@ -143,10 +145,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Check if the inner type is one of the NPO-guaranteed ones. // Check if the inner type is one of the NPO-guaranteed ones.
// For that we first unpeel transparent *structs* (but not unions). // For that we first unpeel transparent *structs* (but not unions).
let is_npo = |def: AdtDef<'tcx>| { let is_npo = |def: AdtDef<'tcx>| {
find_attr!( self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
self.tcx.get_all_attrs(def.did()),
AttributeKind::RustcNonnullOptimizationGuaranteed
)
}; };
let inner = self.unfold_transparent(inner, /* may_unfold */ |def| { let inner = self.unfold_transparent(inner, /* may_unfold */ |def| {
// Stop at NPO types so that we don't miss that attribute in the check below! // Stop at NPO types so that we don't miss that attribute in the check below!
@ -316,7 +315,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// We work with a copy of the argument for now; if this is in-place argument passing, we // We work with a copy of the argument for now; if this is in-place argument passing, we
// will later protect the source it comes from. This means the callee cannot observe if we // will later protect the source it comes from. This means the callee cannot observe if we
// did in-place of by-copy argument passing, except for pointer equality tests. // did in-place of by-copy argument passing, except for pointer equality tests.
let caller_arg_copy = caller_arg.copy_fn_arg(); let caller_arg_copy = self.copy_fn_arg(caller_arg);
if !already_live { if !already_live {
let local = callee_arg.as_local().unwrap(); let local = callee_arg.as_local().unwrap();
let meta = caller_arg_copy.meta(); let meta = caller_arg_copy.meta();
@ -355,18 +354,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let _trace = enter_trace_span!(M, step::init_stack_frame, %instance, tracing_separate_thread = Empty); let _trace = enter_trace_span!(M, step::init_stack_frame, %instance, tracing_separate_thread = Empty);
// The first order of business is to figure out the callee signature. // Compute callee information.
// However, that requires the list of variadic arguments. // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
// We use the *caller* information to determine where to split the list of arguments, let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
// and then later check that the callee indeed has the same number of fixed arguments.
let extra_tys = if caller_fn_abi.c_variadic { if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
let fixed_count = usize::try_from(caller_fn_abi.fixed_count).unwrap(); throw_unsup_format!("calling a c-variadic function is not supported");
let extra_tys = args[fixed_count..].iter().map(|arg| arg.layout().ty); }
self.tcx.mk_type_list_from_iter(extra_tys)
} else {
ty::List::empty()
};
let callee_fn_abi = self.fn_abi_of_instance(instance, extra_tys)?;
if caller_fn_abi.conv != callee_fn_abi.conv { if caller_fn_abi.conv != callee_fn_abi.conv {
throw_ub_custom!( throw_ub_custom!(
@ -378,19 +372,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
) )
} }
if caller_fn_abi.c_variadic != callee_fn_abi.c_variadic {
throw_ub!(CVariadicMismatch {
caller_is_c_variadic: caller_fn_abi.c_variadic,
callee_is_c_variadic: callee_fn_abi.c_variadic,
});
}
if caller_fn_abi.c_variadic && caller_fn_abi.fixed_count != callee_fn_abi.fixed_count {
throw_ub!(CVariadicFixedCountMismatch {
caller: caller_fn_abi.fixed_count,
callee: callee_fn_abi.fixed_count,
});
}
// Check that all target features required by the callee (i.e., from // Check that all target features required by the callee (i.e., from
// the attribute `#[target_feature(enable = ...)]`) are enabled at // the attribute `#[target_feature(enable = ...)]`) are enabled at
// compile time. // compile time.
@ -463,10 +444,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// `pass_argument` would be the loop body. It takes care to // `pass_argument` would be the loop body. It takes care to
// not advance `caller_iter` for ignored arguments. // not advance `caller_iter` for ignored arguments.
let mut callee_args_abis = callee_fn_abi.args.iter().enumerate(); let mut callee_args_abis = callee_fn_abi.args.iter().enumerate();
// Determine whether there is a special VaList argument. This is always the
// last argument, and since arguments start at index 1 that's `arg_count`.
let va_list_arg =
callee_fn_abi.c_variadic.then(|| mir::Local::from_usize(body.arg_count));
for local in body.args_iter() { for local in body.args_iter() {
// Construct the destination place for this argument. At this point all // Construct the destination place for this argument. At this point all
// locals are still dead, so we cannot construct a `PlaceTy`. // locals are still dead, so we cannot construct a `PlaceTy`.
@ -475,31 +452,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// type, but the result gets cached so this avoids calling the instantiation // type, but the result gets cached so this avoids calling the instantiation
// query *again* the next time this local is accessed. // query *again* the next time this local is accessed.
let ty = self.layout_of_local(self.frame(), local, None)?.ty; let ty = self.layout_of_local(self.frame(), local, None)?.ty;
if Some(local) == va_list_arg { if Some(local) == body.spread_arg {
// This is the last callee-side argument of a variadic function.
// This argument is a VaList holding the remaining caller-side arguments.
self.storage_live(local)?;
let place = self.eval_place(dest)?;
let mplace = self.force_allocation(&place)?;
// Consume the remaining arguments by putting them into the variable argument
// list.
let varargs = self.allocate_varargs(&mut caller_args, &mut callee_args_abis)?;
// When the frame is dropped, these variable arguments are deallocated.
self.frame_mut().va_list = varargs.clone();
let key = self.va_list_ptr(varargs.into());
// Zero the VaList, so it is fully initialized.
self.write_bytes_ptr(
mplace.ptr(),
(0..mplace.layout.size.bytes()).map(|_| 0u8),
)?;
// Store the "key" pointer in the right field.
let key_mplace = self.va_list_key_field(&mplace)?;
self.write_pointer(key, &key_mplace)?;
} else if Some(local) == body.spread_arg {
// Make the local live once, then fill in the value field by field. // Make the local live once, then fill in the value field by field.
self.storage_live(local)?; self.storage_live(local)?;
// Must be a tuple // Must be a tuple
@ -538,7 +491,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if instance.def.requires_caller_location(*self.tcx) { if instance.def.requires_caller_location(*self.tcx) {
callee_args_abis.next().unwrap(); callee_args_abis.next().unwrap();
} }
// Now we should have no more caller args or callee arg ABIs. // Now we should have no more caller args or callee arg ABIs
assert!( assert!(
callee_args_abis.next().is_none(), callee_args_abis.next().is_none(),
"mismatch between callee ABI and callee body arguments" "mismatch between callee ABI and callee body arguments"
@ -613,7 +566,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if let Some(fallback) = M::call_intrinsic( if let Some(fallback) = M::call_intrinsic(
self, self,
instance, instance,
&Self::copy_fn_args(args), &self.copy_fn_args(args),
destination, destination,
target, target,
unwind, unwind,
@ -700,7 +653,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// An `InPlace` does nothing here, we keep the original receiver intact. We can't // An `InPlace` does nothing here, we keep the original receiver intact. We can't
// really pass the argument in-place anyway, and we are constructing a new // really pass the argument in-place anyway, and we are constructing a new
// `Immediate` receiver. // `Immediate` receiver.
let mut receiver = args[0].copy_fn_arg(); let mut receiver = self.copy_fn_arg(&args[0]);
let receiver_place = loop { let receiver_place = loop {
match receiver.layout.ty.kind() { match receiver.layout.ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => { ty::Ref(..) | ty::RawPtr(..) => {
@ -821,50 +774,41 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
with_caller_location: bool, with_caller_location: bool,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
trace!("init_fn_tail_call: {:#?}", fn_val); trace!("init_fn_tail_call: {:#?}", fn_val);
// This is the "canonical" implementation of tails calls, // This is the "canonical" implementation of tails calls,
// a pop of the current stack frame, followed by a normal call // a pop of the current stack frame, followed by a normal call
// which pushes a new stack frame, with the return address from // which pushes a new stack frame, with the return address from
// the popped stack frame. // the popped stack frame.
// //
// Note that we cannot use `return_from_current_stack_frame`, // Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`,
// as that "executes" the goto to the return block, but we don't want to, // as the latter "executes" the goto to the return block, but we don't want to,
// only the tail called function should return to the current return block. // only the tail called function should return to the current return block.
let StackPopInfo { return_action, return_cont, return_place } =
self.pop_stack_frame_raw(false, |_this, _return_place| {
// This function's return value is just discarded, the tail-callee will fill in the return place instead.
interp_ok(())
})?;
// The arguments need to all be copied since the current stack frame will be removed assert_eq!(return_action, ReturnAction::Normal);
// before the callee even starts executing.
// FIXME(explicit_tail_calls,#144855): does this match what codegen does? // Take the "stack pop cleanup" info, and use that to initiate the next call.
let args = args.iter().map(|fn_arg| FnArg::Copy(fn_arg.copy_fn_arg())).collect::<Vec<_>>(); let ReturnContinuation::Goto { ret, unwind } = return_cont else {
// Remove the frame from the stack. bug!("can't tailcall as root");
let frame = self.pop_stack_frame_raw()?;
// Remember where this frame would have returned to.
let ReturnContinuation::Goto { ret, unwind } = frame.return_cont() else {
bug!("can't tailcall as root of the stack");
}; };
// There's no return value to deal with! Instead, we forward the old return place
// to the new function.
// FIXME(explicit_tail_calls): // FIXME(explicit_tail_calls):
// we should check if both caller&callee can/n't unwind, // we should check if both caller&callee can/n't unwind,
// see <https://github.com/rust-lang/rust/pull/113128#issuecomment-1614979803> // see <https://github.com/rust-lang/rust/pull/113128#issuecomment-1614979803>
// Now push the new stack frame.
self.init_fn_call( self.init_fn_call(
fn_val, fn_val,
(caller_abi, caller_fn_abi), (caller_abi, caller_fn_abi),
&*args, args,
with_caller_location, with_caller_location,
frame.return_place(), &return_place,
ret, ret,
unwind, unwind,
)?; )
// Finally, clear the local variables. Has to be done after pushing to support
// non-scalar arguments.
// FIXME(explicit_tail_calls,#144855): revisit this once codegen supports indirect
// arguments, to ensure the semantics are compatible.
let return_action = self.cleanup_stack_frame(/* unwinding */ false, frame)?;
assert_eq!(return_action, ReturnAction::Normal);
interp_ok(())
} }
pub(super) fn init_drop_in_place_call( pub(super) fn init_drop_in_place_call(
@ -959,18 +903,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// local's value out. // local's value out.
let return_op = let return_op =
self.local_to_op(mir::RETURN_PLACE, None).expect("return place should always be live"); self.local_to_op(mir::RETURN_PLACE, None).expect("return place should always be live");
// Remove the frame from the stack. // Do the actual pop + copy.
let frame = self.pop_stack_frame_raw()?; let stack_pop_info = self.pop_stack_frame_raw(unwinding, |this, return_place| {
// Copy the return value and remember the return continuation. this.copy_op_allow_transmute(&return_op, return_place)?;
if !unwinding { trace!("return value: {:?}", this.dump_place(return_place));
self.copy_op_allow_transmute(&return_op, frame.return_place())?; interp_ok(())
trace!("return value: {:?}", self.dump_place(frame.return_place())); })?;
}
let return_cont = frame.return_cont(); match stack_pop_info.return_action {
// Finish popping the stack frame.
let return_action = self.cleanup_stack_frame(unwinding, frame)?;
// Jump to the next block.
match return_action {
ReturnAction::Normal => {} ReturnAction::Normal => {}
ReturnAction::NoJump => { ReturnAction::NoJump => {
// The hook already did everything. // The hook already did everything.
@ -988,7 +928,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Normal return, figure out where to jump. // Normal return, figure out where to jump.
if unwinding { if unwinding {
// Follow the unwind edge. // Follow the unwind edge.
match return_cont { match stack_pop_info.return_cont {
ReturnContinuation::Goto { unwind, .. } => { ReturnContinuation::Goto { unwind, .. } => {
// This must be the very last thing that happens, since it can in fact push a new stack frame. // This must be the very last thing that happens, since it can in fact push a new stack frame.
self.unwind_to_block(unwind) self.unwind_to_block(unwind)
@ -999,7 +939,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
} }
} else { } else {
// Follow the normal return edge. // Follow the normal return edge.
match return_cont { match stack_pop_info.return_cont {
ReturnContinuation::Goto { ret, .. } => self.return_to_block(ret), ReturnContinuation::Goto { ret, .. } => self.return_to_block(ret),
ReturnContinuation::Stop { .. } => { ReturnContinuation::Stop { .. } => {
assert!( assert!(

View file

@ -23,8 +23,8 @@ use super::memory::MemoryKind;
use super::util::ensure_monomorphic_enough; use super::util::ensure_monomorphic_enough;
use super::{ use super::{
AllocId, CheckInAllocMsg, ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Pointer, AllocId, CheckInAllocMsg, ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Pointer,
PointerArithmetic, Projectable, Provenance, Scalar, err_ub_custom, err_unsup_format, interp_ok, PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format, interp_ok, throw_inval,
throw_inval, throw_ub, throw_ub_custom, throw_ub_format, throw_unsup_format, throw_ub_custom, throw_ub_format,
}; };
use crate::interpret::Writeable; use crate::interpret::Writeable;
@ -750,57 +750,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.float_muladd_intrinsic::<Quad>(args, dest, MulAddType::Nondeterministic)? self.float_muladd_intrinsic::<Quad>(args, dest, MulAddType::Nondeterministic)?
} }
sym::va_copy => {
let va_list = self.deref_pointer(&args[0])?;
let key_mplace = self.va_list_key_field(&va_list)?;
let key = self.read_pointer(&key_mplace)?;
let varargs = self.get_ptr_va_list(key)?;
let copy_key = self.va_list_ptr(varargs.clone());
let copy_key_mplace = self.va_list_key_field(dest)?;
self.write_pointer(copy_key, &copy_key_mplace)?;
}
sym::va_end => {
let va_list = self.deref_pointer(&args[0])?;
let key_mplace = self.va_list_key_field(&va_list)?;
let key = self.read_pointer(&key_mplace)?;
self.deallocate_va_list(key)?;
}
sym::va_arg => {
let va_list = self.deref_pointer(&args[0])?;
let key_mplace = self.va_list_key_field(&va_list)?;
let key = self.read_pointer(&key_mplace)?;
// Invalidate the old list and get its content. We'll recreate the
// new list (one element shorter) below.
let mut varargs = self.deallocate_va_list(key)?;
let Some(arg_mplace) = varargs.pop_front() else {
throw_ub!(VaArgOutOfBounds);
};
// NOTE: In C some type conversions are allowed (e.g. casting between signed and
// unsigned integers). For now we require c-variadic arguments to be read with the
// exact type they were passed as.
if arg_mplace.layout.ty != dest.layout.ty {
throw_unsup_format!(
"va_arg type mismatch: requested `{}`, but next argument is `{}`",
dest.layout.ty,
arg_mplace.layout.ty
);
}
// Copy the argument.
self.copy_op(&arg_mplace, dest)?;
// Update the VaList pointer.
let new_key = self.va_list_ptr(varargs);
self.write_pointer(new_key, &key_mplace)?;
}
// Unsupported intrinsic: skip the return_to_block below. // Unsupported intrinsic: skip the return_to_block below.
_ => return interp_ok(false), _ => return interp_ok(false),
} }
@ -1281,26 +1230,4 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
interp_ok(Some(ImmTy::from_scalar(val, cast_to))) interp_ok(Some(ImmTy::from_scalar(val, cast_to)))
} }
} }
/// Get the MPlace of the key from the place storing the VaList.
pub(super) fn va_list_key_field<P: Projectable<'tcx, M::Provenance>>(
&self,
va_list: &P,
) -> InterpResult<'tcx, P> {
// The struct wrapped by VaList.
let va_list_inner = self.project_field(va_list, FieldIdx::ZERO)?;
// Find the first pointer field in this struct. The exact index is target-specific.
let ty::Adt(adt, substs) = va_list_inner.layout().ty.kind() else {
bug!("invalid VaListImpl layout");
};
for (i, field) in adt.non_enum_variant().fields.iter().enumerate() {
if field.ty(*self.tcx, substs).is_raw_ptr() {
return self.project_field(&va_list_inner, FieldIdx::from_usize(i));
}
}
bug!("no VaListImpl field is a pointer");
}
} }

View file

@ -23,8 +23,8 @@ use tracing::{debug, instrument, trace};
use super::{ use super::{
AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg, AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,
CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
MayLeak, Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub, Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,
err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format, err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
}; };
use crate::const_eval::ConstEvalErrKind; use crate::const_eval::ConstEvalErrKind;
@ -67,8 +67,6 @@ pub enum AllocKind {
LiveData, LiveData,
/// A function allocation (that fn ptrs point to). /// A function allocation (that fn ptrs point to).
Function, Function,
/// A variable argument list allocation (used by c-variadic functions).
VaList,
/// A vtable allocation. /// A vtable allocation.
VTable, VTable,
/// A TypeId allocation. /// A TypeId allocation.
@ -128,9 +126,6 @@ pub struct Memory<'tcx, M: Machine<'tcx>> {
/// Map for "extra" function pointers. /// Map for "extra" function pointers.
extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>, extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
/// Map storing variable argument lists.
va_list_map: FxIndexMap<AllocId, VecDeque<MPlaceTy<'tcx, M::Provenance>>>,
/// To be able to compare pointers with null, and to check alignment for accesses /// To be able to compare pointers with null, and to check alignment for accesses
/// to ZSTs (where pointers may dangle), we keep track of the size even for allocations /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
/// that do not exist any more. /// that do not exist any more.
@ -166,7 +161,6 @@ impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
Memory { Memory {
alloc_map: M::MemoryMap::default(), alloc_map: M::MemoryMap::default(),
extra_fn_ptr_map: FxIndexMap::default(), extra_fn_ptr_map: FxIndexMap::default(),
va_list_map: FxIndexMap::default(),
dead_alloc_map: FxIndexMap::default(), dead_alloc_map: FxIndexMap::default(),
validation_in_progress: Cell::new(false), validation_in_progress: Cell::new(false),
} }
@ -205,11 +199,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return M::extern_static_pointer(self, def_id); return M::extern_static_pointer(self, def_id);
} }
None => { None => {
let is_fn_ptr = self.memory.extra_fn_ptr_map.contains_key(&alloc_id);
let is_va_list = self.memory.va_list_map.contains_key(&alloc_id);
assert!( assert!(
is_fn_ptr || is_va_list, self.memory.extra_fn_ptr_map.contains_key(&alloc_id),
"{alloc_id:?} is neither global, va_list nor a function pointer" "{alloc_id:?} is neither global nor a function pointer"
); );
} }
_ => {} _ => {}
@ -237,19 +229,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.global_root_pointer(Pointer::from(id)).unwrap() self.global_root_pointer(Pointer::from(id)).unwrap()
} }
/// Insert a new variable argument list in the global map of variable argument lists.
pub fn va_list_ptr(
&mut self,
varargs: VecDeque<MPlaceTy<'tcx, M::Provenance>>,
) -> Pointer<M::Provenance> {
let id = self.tcx.reserve_alloc_id();
let old = self.memory.va_list_map.insert(id, varargs);
assert!(old.is_none());
// Variable argument lists are global allocations, so make sure we get the right root
// pointer. We know this is not an `extern static` so this cannot fail.
self.global_root_pointer(Pointer::from(id)).unwrap()
}
pub fn allocate_ptr( pub fn allocate_ptr(
&mut self, &mut self,
size: Size, size: Size,
@ -977,7 +956,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
pub fn is_alloc_live(&self, id: AllocId) -> bool { pub fn is_alloc_live(&self, id: AllocId) -> bool {
self.memory.alloc_map.contains_key_ref(&id) self.memory.alloc_map.contains_key_ref(&id)
|| self.memory.extra_fn_ptr_map.contains_key(&id) || self.memory.extra_fn_ptr_map.contains_key(&id)
|| self.memory.va_list_map.contains_key(&id)
// We check `tcx` last as that has to acquire a lock in `many-seeds` mode. // We check `tcx` last as that has to acquire a lock in `many-seeds` mode.
// This also matches the order in `get_alloc_info`. // This also matches the order in `get_alloc_info`.
|| self.tcx.try_get_global_alloc(id).is_some() || self.tcx.try_get_global_alloc(id).is_some()
@ -1017,11 +995,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not); return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not);
} }
// # Variable argument lists
if self.memory.va_list_map.contains_key(&id) {
return AllocInfo::new(Size::ZERO, Align::ONE, AllocKind::VaList, Mutability::Not);
}
// # Global allocations // # Global allocations
if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) { if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
// NOTE: `static` alignment from attributes has already been applied to the allocation. // NOTE: `static` alignment from attributes has already been applied to the allocation.
@ -1096,43 +1069,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
.into() .into()
} }
pub fn get_ptr_va_list(
&self,
ptr: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, &VecDeque<MPlaceTy<'tcx, M::Provenance>>> {
trace!("get_ptr_va_list({:?})", ptr);
let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
if offset.bytes() != 0 {
throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))
}
let Some(va_list) = self.memory.va_list_map.get(&alloc_id) else {
throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))
};
interp_ok(va_list)
}
/// Removes this VaList from the global map of variable argument lists. This does not deallocate
/// the VaList elements, that happens when the Frame is popped.
pub fn deallocate_va_list(
&mut self,
ptr: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, VecDeque<MPlaceTy<'tcx, M::Provenance>>> {
trace!("deallocate_va_list({:?})", ptr);
let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
if offset.bytes() != 0 {
throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))
}
let Some(va_list) = self.memory.va_list_map.swap_remove(&alloc_id) else {
throw_ub!(InvalidVaListPointer(Pointer::new(alloc_id, offset)))
};
self.memory.dead_alloc_map.insert(alloc_id, (Size::ZERO, Align::ONE));
interp_ok(va_list)
}
/// Get the dynamic type of the given vtable pointer. /// Get the dynamic type of the given vtable pointer.
/// If `expected_trait` is `Some`, it must be a vtable for the given trait. /// If `expected_trait` is `Some`, it must be a vtable for the given trait.
pub fn get_ptr_vtable_ty( pub fn get_ptr_vtable_ty(

View file

@ -36,7 +36,7 @@ pub use self::operand::{ImmTy, Immediate, OpTy};
pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable}; pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
use self::place::{MemPlace, Place}; use self::place::{MemPlace, Place};
pub use self::projection::{OffsetMode, Projectable}; pub use self::projection::{OffsetMode, Projectable};
pub use self::stack::{Frame, FrameInfo, LocalState, ReturnContinuation}; pub use self::stack::{Frame, FrameInfo, LocalState, ReturnContinuation, StackPopInfo};
pub use self::util::EnteredTraceSpan; pub use self::util::EnteredTraceSpan;
pub(crate) use self::util::create_static_alloc; pub(crate) use self::util::create_static_alloc;
pub use self::validity::{CtfeValidationMode, RangeSet, RefTracking}; pub use self::validity::{CtfeValidationMode, RangeSet, RefTracking};

View file

@ -12,14 +12,13 @@ use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{bug, mir}; use rustc_middle::{bug, mir};
use rustc_mir_dataflow::impls::always_storage_live_locals; use rustc_mir_dataflow::impls::always_storage_live_locals;
use rustc_span::Span; use rustc_span::Span;
use rustc_target::callconv::ArgAbi;
use tracing::field::Empty; use tracing::field::Empty;
use tracing::{info_span, instrument, trace}; use tracing::{info_span, instrument, trace};
use super::{ use super::{
AllocId, CtfeProvenance, FnArg, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, AllocId, CtfeProvenance, Immediate, InterpCx, InterpResult, Machine, MemPlace, MemPlaceMeta,
MemPlaceMeta, MemoryKind, Operand, PlaceTy, Pointer, Provenance, ReturnAction, Scalar, MemoryKind, Operand, PlaceTy, Pointer, Provenance, ReturnAction, Scalar, from_known_layout,
from_known_layout, interp_ok, throw_ub, throw_unsup, interp_ok, throw_ub, throw_unsup,
}; };
use crate::{enter_trace_span, errors}; use crate::{enter_trace_span, errors};
@ -81,7 +80,7 @@ pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> {
/// and its layout in the caller. This place is to be interpreted relative to the /// and its layout in the caller. This place is to be interpreted relative to the
/// *caller's* stack frame. We use a `PlaceTy` instead of an `MPlaceTy` since this /// *caller's* stack frame. We use a `PlaceTy` instead of an `MPlaceTy` since this
/// avoids having to move *all* return places into Miri's memory. /// avoids having to move *all* return places into Miri's memory.
return_place: PlaceTy<'tcx, Prov>, pub return_place: PlaceTy<'tcx, Prov>,
/// The list of locals for this stack frame, stored in order as /// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`. /// `[return_ptr, arguments..., variables..., temporaries...]`.
@ -92,10 +91,6 @@ pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> {
/// Do *not* access this directly; always go through the machine hook! /// Do *not* access this directly; always go through the machine hook!
pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>, pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>,
/// The complete variable argument list of this frame. Its elements must be dropped when the
/// frame is popped.
pub(super) va_list: Vec<MPlaceTy<'tcx, Prov>>,
/// The span of the `tracing` crate is stored here. /// The span of the `tracing` crate is stored here.
/// When the guard is dropped, the span is exited. This gives us /// When the guard is dropped, the span is exited. This gives us
/// a full stack trace on all tracing statements. /// a full stack trace on all tracing statements.
@ -127,6 +122,19 @@ pub enum ReturnContinuation {
Stop { cleanup: bool }, Stop { cleanup: bool },
} }
/// Return type of [`InterpCx::pop_stack_frame_raw`].
pub struct StackPopInfo<'tcx, Prov: Provenance> {
/// Additional information about the action to be performed when returning from the popped
/// stack frame.
pub return_action: ReturnAction,
/// [`return_cont`](Frame::return_cont) of the popped stack frame.
pub return_cont: ReturnContinuation,
/// [`return_place`](Frame::return_place) of the popped stack frame.
pub return_place: PlaceTy<'tcx, Prov>,
}
/// State of a local variable including a memoized layout /// State of a local variable including a memoized layout
#[derive(Clone)] #[derive(Clone)]
pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> { pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> {
@ -251,7 +259,6 @@ impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> {
return_cont: self.return_cont, return_cont: self.return_cont,
return_place: self.return_place, return_place: self.return_place,
locals: self.locals, locals: self.locals,
va_list: self.va_list,
loc: self.loc, loc: self.loc,
extra, extra,
tracing_span: self.tracing_span, tracing_span: self.tracing_span,
@ -279,14 +286,6 @@ impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> {
self.instance self.instance
} }
pub fn return_place(&self) -> &PlaceTy<'tcx, Prov> {
&self.return_place
}
pub fn return_cont(&self) -> ReturnContinuation {
self.return_cont
}
/// Return the `SourceInfo` of the current instruction. /// Return the `SourceInfo` of the current instruction.
pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
self.loc.left().map(|loc| self.body.source_info(loc)) self.loc.left().map(|loc| self.body.source_info(loc))
@ -378,7 +377,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return_cont, return_cont,
return_place: return_place.clone(), return_place: return_place.clone(),
locals, locals,
va_list: vec![],
instance, instance,
tracing_span: SpanGuard::new(), tracing_span: SpanGuard::new(),
extra: (), extra: (),
@ -412,26 +410,35 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
interp_ok(()) interp_ok(())
} }
/// Low-level helper that pops a stack frame from the stack without any cleanup. /// Low-level helper that pops a stack frame from the stack and returns some information about
/// This invokes `before_stack_pop`. /// it.
/// After calling this function, you need to deal with the return value, and then ///
/// invoke `cleanup_stack_frame`. /// This also deallocates locals, if necessary.
/// `copy_ret_val` gets called after the frame has been taken from the stack but before the locals have been deallocated.
///
/// [`M::before_stack_pop`] and [`M::after_stack_pop`] are called by this function
/// automatically.
///
/// The high-level version of this is `return_from_current_stack_frame`.
///
/// [`M::before_stack_pop`]: Machine::before_stack_pop
/// [`M::after_stack_pop`]: Machine::after_stack_pop
pub(super) fn pop_stack_frame_raw( pub(super) fn pop_stack_frame_raw(
&mut self, &mut self,
) -> InterpResult<'tcx, Frame<'tcx, M::Provenance, M::FrameExtra>> { unwinding: bool,
copy_ret_val: impl FnOnce(&mut Self, &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx>,
) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> {
M::before_stack_pop(self)?; M::before_stack_pop(self)?;
let frame = let frame =
self.stack_mut().pop().expect("tried to pop a stack frame, but there were none"); self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
interp_ok(frame)
}
/// Deallocate local variables in the stack frame, and invoke `after_stack_pop`. // Copy return value (unless we are unwinding).
pub(super) fn cleanup_stack_frame( if !unwinding {
&mut self, copy_ret_val(self, &frame.return_place)?;
unwinding: bool, }
frame: Frame<'tcx, M::Provenance, M::FrameExtra>,
) -> InterpResult<'tcx, ReturnAction> {
let return_cont = frame.return_cont; let return_cont = frame.return_cont;
let return_place = frame.return_place.clone();
// Cleanup: deallocate locals. // Cleanup: deallocate locals.
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't. // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
@ -441,22 +448,22 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
ReturnContinuation::Stop { cleanup, .. } => cleanup, ReturnContinuation::Stop { cleanup, .. } => cleanup,
}; };
if cleanup { let return_action = if cleanup {
// We need to take the locals out, since we need to mutate while iterating.
for local in &frame.locals { for local in &frame.locals {
self.deallocate_local(local.value)?; self.deallocate_local(local.value)?;
} }
// Deallocate any c-variadic arguments.
self.deallocate_varargs(&frame.va_list)?;
// Call the machine hook, which determines the next steps. // Call the machine hook, which determines the next steps.
let return_action = M::after_stack_pop(self, frame, unwinding)?; let return_action = M::after_stack_pop(self, frame, unwinding)?;
assert_ne!(return_action, ReturnAction::NoCleanup); assert_ne!(return_action, ReturnAction::NoCleanup);
interp_ok(return_action) return_action
} else { } else {
// We also skip the machine hook when there's no cleanup. This not a real "pop" anyway. // We also skip the machine hook when there's no cleanup. This not a real "pop" anyway.
interp_ok(ReturnAction::NoCleanup) ReturnAction::NoCleanup
} };
interp_ok(StackPopInfo { return_action, return_cont, return_place })
} }
/// In the current stack frame, mark all locals as live that are not arguments and don't have /// In the current stack frame, mark all locals as live that are not arguments and don't have
@ -619,58 +626,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
} }
} }
impl<'a, 'tcx: 'a, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Consume the arguments provided by the iterator and store them as a list
/// of variadic arguments. Return a list of the places that hold those arguments.
pub(crate) fn allocate_varargs<I, J>(
&mut self,
caller_args: &mut I,
callee_abis: &mut J,
) -> InterpResult<'tcx, Vec<MPlaceTy<'tcx, M::Provenance>>>
where
I: Iterator<Item = (&'a FnArg<'tcx, M::Provenance>, &'a ArgAbi<'tcx, Ty<'tcx>>)>,
J: Iterator<Item = (usize, &'a ArgAbi<'tcx, Ty<'tcx>>)>,
{
// Consume the remaining arguments and store them in fresh allocations.
let mut varargs = Vec::new();
for (fn_arg, caller_abi) in caller_args {
// The callee ABI is entirely computed based on which arguments the caller has
// provided so it should not be possible to get a mismatch here.
let (_idx, callee_abi) = callee_abis.next().unwrap();
assert!(self.check_argument_compat(caller_abi, callee_abi)?);
// FIXME: do we have to worry about in-place argument passing?
let op = fn_arg.copy_fn_arg();
let mplace = self.allocate(op.layout, MemoryKind::Stack)?;
self.copy_op(&op, &mplace)?;
varargs.push(mplace);
}
assert!(callee_abis.next().is_none());
interp_ok(varargs)
}
/// Deallocate the variadic arguments in the list (that must have been created with `allocate_varargs`).
fn deallocate_varargs(
&mut self,
varargs: &[MPlaceTy<'tcx, M::Provenance>],
) -> InterpResult<'tcx> {
for vararg in varargs {
let ptr = vararg.ptr();
trace!(
"deallocating vararg {:?}: {:?}",
vararg,
// Locals always have a `alloc_id` (they are never the result of a int2ptr).
self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap())
);
self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
}
interp_ok(())
}
}
impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> { impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
pub(super) fn print( pub(super) fn print(
&self, &self,

View file

@ -249,6 +249,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.write_immediate(*val, &dest)?; self.write_immediate(*val, &dest)?;
} }
ShallowInitBox(ref operand, _) => {
let src = self.eval_operand(operand, None)?;
let v = self.read_immediate(&src)?;
self.write_immediate(*v, &dest)?;
}
Cast(cast_kind, ref operand, cast_ty) => { Cast(cast_kind, ref operand, cast_ty) => {
let src = self.eval_operand(operand, None)?; let src = self.eval_operand(operand, None)?;
let cast_ty = let cast_ty =

View file

@ -647,8 +647,13 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
} }
} else { } else {
// This is not CTFE, so it's Miri with recursive checking. // This is not CTFE, so it's Miri with recursive checking.
// FIXME: should we also `UnsafeCell` behind shared references? Currently that is not // FIXME: we do *not* check behind boxes, since creating a new box first creates it uninitialized
// and then puts the value in there, so briefly we have a box with uninit contents.
// FIXME: should we also skip `UnsafeCell` behind shared references? Currently that is not
// needed since validation reads bypass Stacked Borrows and data race checks. // needed since validation reads bypass Stacked Borrows and data race checks.
if matches!(ptr_kind, PointerKind::Box) {
return interp_ok(());
}
} }
let path = &self.path; let path = &self.path;
ref_tracking.track(place, || { ref_tracking.track(place, || {

View file

@ -9,7 +9,7 @@ arrayvec = { version = "0.7", default-features = false }
bitflags = "2.4.1" bitflags = "2.4.1"
either = "1.0" either = "1.0"
elsa = "1.11.0" elsa = "1.11.0"
ena = "0.14.4" ena = "0.14.3"
indexmap = "2.12.1" indexmap = "2.12.1"
jobserver_crate = { version = "0.1.28", package = "jobserver" } jobserver_crate = { version = "0.1.28", package = "jobserver" }
measureme = "12.0.1" measureme = "12.0.1"

View file

@ -128,21 +128,34 @@ fn par_slice<I: DynSend>(
guard: &ParallelGuard, guard: &ParallelGuard,
for_each: impl Fn(&mut I) + DynSync + DynSend, for_each: impl Fn(&mut I) + DynSync + DynSend,
) { ) {
let for_each = FromDyn::from(for_each); struct State<'a, F> {
let mut items = for_each.derive(items); for_each: FromDyn<F>,
rustc_thread_pool::scope(|s| { guard: &'a ParallelGuard,
let proof = items.derive(()); group: usize,
let group_size = std::cmp::max(items.len() / 128, 1); }
for group in items.chunks_mut(group_size) {
let group = proof.derive(group); fn par_rec<I: DynSend, F: Fn(&mut I) + DynSync + DynSend>(
s.spawn(|_| { items: &mut [I],
let mut group = group; state: &State<'_, F>,
for i in group.iter_mut() { ) {
guard.run(|| for_each(i)); if items.len() <= state.group {
} for item in items {
}); state.guard.run(|| (state.for_each)(item));
}
} else {
let (left, right) = items.split_at_mut(items.len() / 2);
let mut left = state.for_each.derive(left);
let mut right = state.for_each.derive(right);
rustc_thread_pool::join(move || par_rec(*left, state), move || par_rec(*right, state));
} }
}); }
let state = State {
for_each: FromDyn::from(for_each),
guard,
group: std::cmp::max(items.len() / 128, 1),
};
par_rec(items, &state)
} }
pub fn par_for_each_in<I: DynSend, T: IntoIterator<Item = I>>( pub fn par_for_each_in<I: DynSend, T: IntoIterator<Item = I>>(

View file

@ -1,11 +1,9 @@
#### Note: this error code is no longer emitted by the compiler.
The value of statics and constants must be known at compile time, and they live The value of statics and constants must be known at compile time, and they live
for the entire lifetime of a program. Creating a boxed value allocates memory on for the entire lifetime of a program. Creating a boxed value allocates memory on
the heap at runtime, and therefore cannot be done at compile time. the heap at runtime, and therefore cannot be done at compile time.
Erroneous code example: Erroneous code example:
```ignore (no longer emitted) ```compile_fail,E0010
const CON : Vec<i32> = vec![1, 2, 3]; const CON : Vec<i32> = vec![1, 2, 3];
``` ```

View file

@ -1,7 +1,7 @@
The requested ABI is unsupported by the current target. The requested ABI is unsupported by the current target.
The Rust compiler maintains a list of unsupported ABIs for each target. The rust compiler maintains for each target a list of unsupported ABIs on
If an ABI is present in such a list, this usually means that the that target. If an ABI is present in such a list this usually means that the
target / ABI combination is currently unsupported by llvm. target / ABI combination is currently unsupported by llvm.
If necessary, you can circumvent this check using custom target specifications. If necessary, you can circumvent this check using custom target specifications.

View file

@ -20,8 +20,7 @@
// //
// Do *not* remove entries from this list. Instead, just add a note to the corresponding markdown // Do *not* remove entries from this list. Instead, just add a note to the corresponding markdown
// file saying that this error is not emitted by the compiler any more (see E0001.md for an // file saying that this error is not emitted by the compiler any more (see E0001.md for an
// example), and remove all code examples that do not build any more by marking them // example), and remove all code examples that do not build any more.
// with `ignore (no longer emitted)`.
#[macro_export] #[macro_export]
#[rustfmt::skip] #[rustfmt::skip]
macro_rules! error_codes { macro_rules! error_codes {

View file

@ -2,11 +2,12 @@ use std::any::Any;
use std::default::Default; use std::default::Default;
use std::iter; use std::iter;
use std::path::Component::Prefix; use std::path::Component::Prefix;
use std::path::PathBuf; use std::path::{Path, PathBuf};
use std::rc::Rc; use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use rustc_ast::attr::MarkedAttrs; use rustc_ast::attr::MarkedAttrs;
use rustc_ast::token::MetaVarKind;
use rustc_ast::tokenstream::TokenStream; use rustc_ast::tokenstream::TokenStream;
use rustc_ast::visit::{AssocCtxt, Visitor}; use rustc_ast::visit::{AssocCtxt, Visitor};
use rustc_ast::{self as ast, AttrVec, Attribute, HasAttrs, Item, NodeId, PatKind, Safety}; use rustc_ast::{self as ast, AttrVec, Attribute, HasAttrs, Item, NodeId, PatKind, Safety};
@ -21,14 +22,14 @@ use rustc_hir::limit::Limit;
use rustc_hir::{Stability, find_attr}; use rustc_hir::{Stability, find_attr};
use rustc_lint_defs::RegisteredTools; use rustc_lint_defs::RegisteredTools;
use rustc_parse::MACRO_ARGUMENTS; use rustc_parse::MACRO_ARGUMENTS;
use rustc_parse::parser::Parser; use rustc_parse::parser::{AllowConstBlockItems, ForceCollect, Parser};
use rustc_session::Session; use rustc_session::Session;
use rustc_session::parse::ParseSess; use rustc_session::parse::ParseSess;
use rustc_span::def_id::{CrateNum, DefId, LocalDefId}; use rustc_span::def_id::{CrateNum, DefId, LocalDefId};
use rustc_span::edition::Edition; use rustc_span::edition::Edition;
use rustc_span::hygiene::{AstPass, ExpnData, ExpnKind, LocalExpnId, MacroKind}; use rustc_span::hygiene::{AstPass, ExpnData, ExpnKind, LocalExpnId, MacroKind};
use rustc_span::source_map::SourceMap; use rustc_span::source_map::SourceMap;
use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw}; use rustc_span::{DUMMY_SP, FileName, Ident, Span, Symbol, kw, sym};
use smallvec::{SmallVec, smallvec}; use smallvec::{SmallVec, smallvec};
use thin_vec::ThinVec; use thin_vec::ThinVec;
@ -1420,3 +1421,80 @@ pub fn resolve_path(sess: &Session, path: impl Into<PathBuf>, span: Span) -> PRe
} }
} }
} }
/// If this item looks like a specific enums from `rental`, emit a fatal error.
/// See #73345 and #83125 for more details.
/// FIXME(#73933): Remove this eventually.
fn pretty_printing_compatibility_hack(item: &Item, psess: &ParseSess) {
if let ast::ItemKind::Enum(ident, _, enum_def) = &item.kind
&& ident.name == sym::ProceduralMasqueradeDummyType
&& let [variant] = &*enum_def.variants
&& variant.ident.name == sym::Input
&& let FileName::Real(real) = psess.source_map().span_to_filename(ident.span)
&& let Some(c) = real
.local_path()
.unwrap_or(Path::new(""))
.components()
.flat_map(|c| c.as_os_str().to_str())
.find(|c| c.starts_with("rental") || c.starts_with("allsorts-rental"))
{
let crate_matches = if c.starts_with("allsorts-rental") {
true
} else {
let mut version = c.trim_start_matches("rental-").split('.');
version.next() == Some("0")
&& version.next() == Some("5")
&& version.next().and_then(|c| c.parse::<u32>().ok()).is_some_and(|v| v < 6)
};
if crate_matches {
psess.dcx().emit_fatal(errors::ProcMacroBackCompat {
crate_name: "rental".to_string(),
fixed_version: "0.5.6".to_string(),
});
}
}
}
pub(crate) fn ann_pretty_printing_compatibility_hack(ann: &Annotatable, psess: &ParseSess) {
let item = match ann {
Annotatable::Item(item) => item,
Annotatable::Stmt(stmt) => match &stmt.kind {
ast::StmtKind::Item(item) => item,
_ => return,
},
_ => return,
};
pretty_printing_compatibility_hack(item, psess)
}
pub(crate) fn stream_pretty_printing_compatibility_hack(
kind: MetaVarKind,
stream: &TokenStream,
psess: &ParseSess,
) {
let item = match kind {
MetaVarKind::Item => {
let mut parser = Parser::new(psess, stream.clone(), None);
// No need to collect tokens for this simple check.
parser
.parse_item(ForceCollect::No, AllowConstBlockItems::No)
.expect("failed to reparse item")
.expect("an actual item")
}
MetaVarKind::Stmt => {
let mut parser = Parser::new(psess, stream.clone(), None);
// No need to collect tokens for this simple check.
let stmt = parser
.parse_stmt(ForceCollect::No)
.expect("failed to reparse")
.expect("an actual stmt");
match &stmt.kind {
ast::StmtKind::Item(item) => item.clone(),
_ => return,
}
}
_ => return,
};
pretty_printing_compatibility_hack(&item, psess)
}

View file

@ -729,7 +729,7 @@ impl<'a> ExtCtxt<'a> {
ty: Box<ast::Ty>, ty: Box<ast::Ty>,
rhs_kind: ast::ConstItemRhsKind, rhs_kind: ast::ConstItemRhsKind,
) -> Box<ast::Item> { ) -> Box<ast::Item> {
let defaultness = ast::Defaultness::Implicit; let defaultness = ast::Defaultness::Final;
self.item( self.item(
span, span,
AttrVec::new(), AttrVec::new(),

View file

@ -86,7 +86,8 @@ pub fn features(sess: &Session, krate_attrs: &[Attribute], crate_name: Symbol) -
if let Some(f) = REMOVED_LANG_FEATURES.iter().find(|f| name == f.feature.name) { if let Some(f) = REMOVED_LANG_FEATURES.iter().find(|f| name == f.feature.name) {
let pull_note = if let Some(pull) = f.pull { let pull_note = if let Some(pull) = f.pull {
format!( format!(
"; see <https://github.com/rust-lang/rust/pull/{pull}> for more information", "; see <https://github.com/rust-lang/rust/pull/{}> for more information",
pull
) )
} else { } else {
"".to_owned() "".to_owned()
@ -122,7 +123,7 @@ pub fn features(sess: &Session, krate_attrs: &[Attribute], crate_name: Symbol) -
// If the enabled feature is unstable, record it. // If the enabled feature is unstable, record it.
if UNSTABLE_LANG_FEATURES.iter().find(|f| name == f.name).is_some() { if UNSTABLE_LANG_FEATURES.iter().find(|f| name == f.name).is_some() {
// When the ICE comes from a standard library crate, there's a chance that the person // When the ICE comes a standard library crate, there's a chance that the person
// hitting the ICE may be using -Zbuild-std or similar with an untested target. // hitting the ICE may be using -Zbuild-std or similar with an untested target.
// The bug is probably in the standard library and not the compiler in that case, // The bug is probably in the standard library and not the compiler in that case,
// but that doesn't really matter - we want a bug report. // but that doesn't really matter - we want a bug report.

View file

@ -446,6 +446,18 @@ pub(crate) struct GlobDelegationTraitlessQpath {
pub span: Span, pub span: Span,
} }
// This used to be the `proc_macro_back_compat` lint (#83125). It was later
// turned into a hard error.
#[derive(Diagnostic)]
#[diag("using an old version of `{$crate_name}`")]
#[note(
"older versions of the `{$crate_name}` crate no longer compile; please update to `{$crate_name}` v{$fixed_version}, or switch to one of the `{$crate_name}` alternatives"
)]
pub(crate) struct ProcMacroBackCompat {
pub crate_name: String,
pub fixed_version: String,
}
pub(crate) use metavar_exprs::*; pub(crate) use metavar_exprs::*;
mod metavar_exprs { mod metavar_exprs {
use super::*; use super::*;

View file

@ -105,6 +105,11 @@ impl MultiItemModifier for DeriveProcMacro {
// (e.g. `fn foo() { #[derive(Debug)] struct Bar; }`) // (e.g. `fn foo() { #[derive(Debug)] struct Bar; }`)
let is_stmt = matches!(item, Annotatable::Stmt(..)); let is_stmt = matches!(item, Annotatable::Stmt(..));
// We used to have an alternative behaviour for crates that needed it.
// We had a lint for a long time, but now we just emit a hard error.
// Eventually we might remove the special case hard error check
// altogether. See #73345.
crate::base::ann_pretty_printing_compatibility_hack(&item, &ecx.sess.psess);
let input = item.to_tokens(); let input = item.to_tokens();
let invoc_id = ecx.current_expansion.id; let invoc_id = ecx.current_expansion.id;

View file

@ -103,8 +103,8 @@ impl ToInternal<token::LitKind> for LitKind {
} }
} }
impl FromInternal<TokenStream> for Vec<TokenTree<TokenStream, Span, Symbol>> { impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStream, Span, Symbol>> {
fn from_internal(stream: TokenStream) -> Self { fn from_internal((stream, rustc): (TokenStream, &mut Rustc<'_, '_>)) -> Self {
use rustc_ast::token::*; use rustc_ast::token::*;
// Estimate the capacity as `stream.len()` rounded up to the next power // Estimate the capacity as `stream.len()` rounded up to the next power
@ -115,6 +115,22 @@ impl FromInternal<TokenStream> for Vec<TokenTree<TokenStream, Span, Symbol>> {
while let Some(tree) = iter.next() { while let Some(tree) = iter.next() {
let (Token { kind, span }, joint) = match tree.clone() { let (Token { kind, span }, joint) = match tree.clone() {
tokenstream::TokenTree::Delimited(span, _, mut delim, mut stream) => { tokenstream::TokenTree::Delimited(span, _, mut delim, mut stream) => {
// We used to have an alternative behaviour for crates that
// needed it: a hack used to pass AST fragments to
// attribute and derive macros as a single nonterminal
// token instead of a token stream. Such token needs to be
// "unwrapped" and not represented as a delimited group. We
// had a lint for a long time, but now we just emit a hard
// error. Eventually we might remove the special case hard
// error check altogether. See #73345.
if let Delimiter::Invisible(InvisibleOrigin::MetaVar(kind)) = delim {
crate::base::stream_pretty_printing_compatibility_hack(
kind,
&stream,
rustc.psess(),
);
}
// In `mk_delimited` we avoid nesting invisible delimited // In `mk_delimited` we avoid nesting invisible delimited
// of the same `MetaVarKind`. Here we do the same but // of the same `MetaVarKind`. Here we do the same but
// ignore the `MetaVarKind` because it is discarded when we // ignore the `MetaVarKind` because it is discarded when we
@ -671,7 +687,7 @@ impl server::Server for Rustc<'_, '_> {
&mut self, &mut self,
stream: Self::TokenStream, stream: Self::TokenStream,
) -> Vec<TokenTree<Self::TokenStream, Self::Span, Self::Symbol>> { ) -> Vec<TokenTree<Self::TokenStream, Self::Span, Self::Symbol>> {
FromInternal::from_internal(stream) FromInternal::from_internal((stream, self))
} }
fn span_debug(&mut self, span: Self::Span) -> String { fn span_debug(&mut self, span: Self::Span) -> String {

View file

@ -25,6 +25,9 @@ declare_features! (
// feature-group-start: for testing purposes // feature-group-start: for testing purposes
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
/// A temporary feature gate used to enable parser extensions needed
/// to bootstrap fix for #5723.
(accepted, issue_5723_bootstrap, "1.0.0", None),
/// These are used to test this portion of the compiler, /// These are used to test this portion of the compiler,
/// they don't actually mean anything. /// they don't actually mean anything.
(accepted, test_accepted_feature, "1.0.0", None), (accepted, test_accepted_feature, "1.0.0", None),

View file

@ -1208,7 +1208,7 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
rustc_intrinsic_const_stable_indirect, Normal, rustc_intrinsic_const_stable_indirect, Normal,
template!(Word), WarnFollowing, EncodeCrossCrate::No, "this is an internal implementation detail", template!(Word), WarnFollowing, EncodeCrossCrate::No, "this is an internal implementation detail",
), ),
rustc_attr!( gated!(
rustc_allow_const_fn_unstable, Normal, rustc_allow_const_fn_unstable, Normal,
template!(Word, List: &["feat1, feat2, ..."]), DuplicatesOk, EncodeCrossCrate::No, template!(Word, List: &["feat1, feat2, ..."]), DuplicatesOk, EncodeCrossCrate::No,
"rustc_allow_const_fn_unstable side-steps feature gating and stability checks" "rustc_allow_const_fn_unstable side-steps feature gating and stability checks"

View file

@ -172,9 +172,6 @@ declare_features! (
/// Allow anonymous constants from an inline `const` block in pattern position /// Allow anonymous constants from an inline `const` block in pattern position
(removed, inline_const_pat, "1.88.0", Some(76001), (removed, inline_const_pat, "1.88.0", Some(76001),
Some("removed due to implementation concerns as it requires significant refactorings"), 138492), Some("removed due to implementation concerns as it requires significant refactorings"), 138492),
/// A temporary feature gate used to enable parser extensions needed
/// to bootstrap fix for #5723.
(removed, issue_5723_bootstrap, "CURRENT_RUSTC_VERSION", None, None),
/// Lazily evaluate constants. This allows constants to depend on type parameters. /// Lazily evaluate constants. This allows constants to depend on type parameters.
(removed, lazy_normalization_consts, "1.56.0", Some(72219), Some("superseded by `generic_const_exprs`"), 88369), (removed, lazy_normalization_consts, "1.56.0", Some(72219), Some("superseded by `generic_const_exprs`"), 88369),
/// Changes `impl Trait` to capture all lifetimes in scope. /// Changes `impl Trait` to capture all lifetimes in scope.

View file

@ -64,6 +64,8 @@ pub struct EnabledLibFeature {
} }
impl Features { impl Features {
/// `since` should be set for stable features that are nevertheless enabled with a `#[feature]`
/// attribute, indicating since when they are stable.
pub fn set_enabled_lang_feature(&mut self, lang_feat: EnabledLangFeature) { pub fn set_enabled_lang_feature(&mut self, lang_feat: EnabledLangFeature) {
self.enabled_lang_features.push(lang_feat); self.enabled_lang_features.push(lang_feat);
self.enabled_features.insert(lang_feat.gate_name); self.enabled_features.insert(lang_feat.gate_name);
@ -287,6 +289,10 @@ declare_features! (
(internal, panic_runtime, "1.10.0", Some(32837)), (internal, panic_runtime, "1.10.0", Some(32837)),
/// Allows using pattern types. /// Allows using pattern types.
(internal, pattern_types, "1.79.0", Some(123646)), (internal, pattern_types, "1.79.0", Some(123646)),
/// Allows using `#[rustc_allow_const_fn_unstable]`.
/// This is an attribute on `const fn` for the same
/// purpose as `#[allow_internal_unstable]`.
(internal, rustc_allow_const_fn_unstable, "1.49.0", Some(69399)),
/// Allows using compiler's own crates. /// Allows using compiler's own crates.
(unstable, rustc_private, "1.0.0", Some(27812)), (unstable, rustc_private, "1.0.0", Some(27812)),
/// Allows using internal rustdoc features like `doc(keyword)`. /// Allows using internal rustdoc features like `doc(keyword)`.
@ -414,8 +420,6 @@ declare_features! (
(unstable, const_async_blocks, "1.53.0", Some(85368)), (unstable, const_async_blocks, "1.53.0", Some(85368)),
/// Allows `const { ... }` as a shorthand for `const _: () = const { ... };` for module items. /// Allows `const { ... }` as a shorthand for `const _: () = const { ... };` for module items.
(unstable, const_block_items, "CURRENT_RUSTC_VERSION", Some(149226)), (unstable, const_block_items, "CURRENT_RUSTC_VERSION", Some(149226)),
/// Allows defining and calling c-variadic functions in const contexts.
(unstable, const_c_variadic, "CURRENT_RUSTC_VERSION", Some(151787)),
/// Allows `const || {}` closures in const contexts. /// Allows `const || {}` closures in const contexts.
(incomplete, const_closures, "1.68.0", Some(106003)), (incomplete, const_closures, "1.68.0", Some(106003)),
/// Allows using `[const] Destruct` bounds and calling drop impls in const contexts. /// Allows using `[const] Destruct` bounds and calling drop impls in const contexts.
@ -488,8 +492,6 @@ declare_features! (
(unstable, ffi_const, "1.45.0", Some(58328)), (unstable, ffi_const, "1.45.0", Some(58328)),
/// Allows the use of `#[ffi_pure]` on foreign functions. /// Allows the use of `#[ffi_pure]` on foreign functions.
(unstable, ffi_pure, "1.45.0", Some(58329)), (unstable, ffi_pure, "1.45.0", Some(58329)),
/// Allows marking trait functions as `final` to prevent overriding impls
(unstable, final_associated_functions, "CURRENT_RUSTC_VERSION", Some(131179)),
/// Controlling the behavior of fmt::Debug /// Controlling the behavior of fmt::Debug
(unstable, fmt_debug, "1.82.0", Some(129709)), (unstable, fmt_debug, "1.82.0", Some(129709)),
/// Allows using `#[align(...)]` on function items /// Allows using `#[align(...)]` on function items
@ -775,9 +777,8 @@ impl Features {
} }
} }
/// Some features are not allowed to be used together at the same time. /// Some features are not allowed to be used together at the same time, if
/// /// the two are present, produce an error.
/// If the two are present, produce an error.
pub const INCOMPATIBLE_FEATURES: &[(Symbol, Symbol)] = &[ pub const INCOMPATIBLE_FEATURES: &[(Symbol, Symbol)] = &[
// Experimental match ergonomics rulesets are incompatible with each other, to simplify the // Experimental match ergonomics rulesets are incompatible with each other, to simplify the
// boolean logic required to tell which typing rules to use. // boolean logic required to tell which typing rules to use.

View file

@ -897,9 +897,6 @@ pub enum AttributeKind {
/// Represents `#[debugger_visualizer]`. /// Represents `#[debugger_visualizer]`.
DebuggerVisualizer(ThinVec<DebugVisualizer>), DebuggerVisualizer(ThinVec<DebugVisualizer>),
/// Represents `#![default_lib_allocator]`
DefaultLibAllocator,
/// Represents [`#[deprecated]`](https://doc.rust-lang.org/stable/reference/attributes/diagnostics.html#the-deprecated-attribute). /// Represents [`#[deprecated]`](https://doc.rust-lang.org/stable/reference/attributes/diagnostics.html#the-deprecated-attribute).
Deprecation { deprecation: Deprecation, span: Span }, Deprecation { deprecation: Deprecation, span: Span },
@ -1166,9 +1163,6 @@ pub enum AttributeKind {
/// Represents `#[rustc_diagnostic_item]` /// Represents `#[rustc_diagnostic_item]`
RustcDiagnosticItem(Symbol), RustcDiagnosticItem(Symbol),
/// Represents `#[rustc_do_not_const_check]`
RustcDoNotConstCheck,
/// Represents `#[rustc_dummy]`. /// Represents `#[rustc_dummy]`.
RustcDummy, RustcDummy,
@ -1271,9 +1265,6 @@ pub enum AttributeKind {
/// Represents `#[rustc_non_const_trait_method]`. /// Represents `#[rustc_non_const_trait_method]`.
RustcNonConstTraitMethod, RustcNonConstTraitMethod,
/// Represents `#[rustc_nonnull_optimization_guaranteed]`.
RustcNonnullOptimizationGuaranteed,
/// Represents `#[rustc_nounwind]` /// Represents `#[rustc_nounwind]`
RustcNounwind, RustcNounwind,

View file

@ -35,7 +35,6 @@ impl AttributeKind {
CrateType(_) => No, CrateType(_) => No,
CustomMir(_, _, _) => Yes, CustomMir(_, _, _) => Yes,
DebuggerVisualizer(..) => No, DebuggerVisualizer(..) => No,
DefaultLibAllocator => No,
Deprecation { .. } => Yes, Deprecation { .. } => Yes,
DoNotRecommend { .. } => Yes, DoNotRecommend { .. } => Yes,
Doc(_) => Yes, Doc(_) => Yes,
@ -116,7 +115,6 @@ impl AttributeKind {
RustcDenyExplicitImpl(..) => No, RustcDenyExplicitImpl(..) => No,
RustcDeprecatedSafe2024 { .. } => Yes, RustcDeprecatedSafe2024 { .. } => Yes,
RustcDiagnosticItem(..) => Yes, RustcDiagnosticItem(..) => Yes,
RustcDoNotConstCheck => Yes,
RustcDummy => No, RustcDummy => No,
RustcDumpDefParents => No, RustcDumpDefParents => No,
RustcDumpItemBounds => No, RustcDumpItemBounds => No,
@ -150,7 +148,6 @@ impl AttributeKind {
RustcNoImplicitBounds => No, RustcNoImplicitBounds => No,
RustcNoMirInline => Yes, RustcNoMirInline => Yes,
RustcNonConstTraitMethod => No, // should be reported via other queries like `constness` RustcNonConstTraitMethod => No, // should be reported via other queries like `constness`
RustcNonnullOptimizationGuaranteed => Yes,
RustcNounwind => No, RustcNounwind => No,
RustcObjcClass { .. } => No, RustcObjcClass { .. } => No,
RustcObjcSelector { .. } => No, RustcObjcSelector { .. } => No,

View file

@ -320,6 +320,7 @@ language_item_table! {
FormatArgument, sym::format_argument, format_argument, Target::Struct, GenericRequirement::None; FormatArgument, sym::format_argument, format_argument, Target::Struct, GenericRequirement::None;
FormatArguments, sym::format_arguments, format_arguments, Target::Struct, GenericRequirement::None; FormatArguments, sym::format_arguments, format_arguments, Target::Struct, GenericRequirement::None;
ExchangeMalloc, sym::exchange_malloc, exchange_malloc_fn, Target::Fn, GenericRequirement::None;
DropInPlace, sym::drop_in_place, drop_in_place_fn, Target::Fn, GenericRequirement::Minimum(1); DropInPlace, sym::drop_in_place, drop_in_place_fn, Target::Fn, GenericRequirement::Minimum(1);
AllocLayout, sym::alloc_layout, alloc_layout, Target::Struct, GenericRequirement::None; AllocLayout, sym::alloc_layout, alloc_layout, Target::Struct, GenericRequirement::None;

View file

@ -1175,35 +1175,13 @@ pub(super) fn check_specialization_validity<'tcx>(
if let Err(parent_impl) = result { if let Err(parent_impl) = result {
if !tcx.is_impl_trait_in_trait(impl_item) { if !tcx.is_impl_trait_in_trait(impl_item) {
let span = tcx.def_span(impl_item); report_forbidden_specialization(tcx, impl_item, parent_impl);
let ident = tcx.item_ident(impl_item);
let err = match tcx.span_of_impl(parent_impl) {
Ok(sp) => errors::ImplNotMarkedDefault::Ok { span, ident, ok_label: sp },
Err(cname) => errors::ImplNotMarkedDefault::Err { span, ident, cname },
};
tcx.dcx().emit_err(err);
} else { } else {
tcx.dcx().delayed_bug(format!("parent item: {parent_impl:?} not marked as default")); tcx.dcx().delayed_bug(format!("parent item: {parent_impl:?} not marked as default"));
} }
} }
} }
fn check_overriding_final_trait_item<'tcx>(
tcx: TyCtxt<'tcx>,
trait_item: ty::AssocItem,
impl_item: ty::AssocItem,
) {
if trait_item.defaultness(tcx).is_final() {
tcx.dcx().emit_err(errors::OverridingFinalTraitFunction {
impl_span: tcx.def_span(impl_item.def_id),
trait_span: tcx.def_span(trait_item.def_id),
ident: tcx.item_ident(impl_item.def_id),
});
}
}
fn check_impl_items_against_trait<'tcx>( fn check_impl_items_against_trait<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
impl_id: LocalDefId, impl_id: LocalDefId,
@ -1281,8 +1259,6 @@ fn check_impl_items_against_trait<'tcx>(
impl_id.to_def_id(), impl_id.to_def_id(),
impl_item, impl_item,
); );
check_overriding_final_trait_item(tcx, ty_trait_item, ty_impl_item);
} }
if let Ok(ancestors) = trait_def.ancestors(tcx, impl_id.to_def_id()) { if let Ok(ancestors) = trait_def.ancestors(tcx, impl_id.to_def_id()) {

View file

@ -77,11 +77,11 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi
| sym::autodiff | sym::autodiff
| sym::bitreverse | sym::bitreverse
| sym::black_box | sym::black_box
| sym::box_new
| sym::breakpoint | sym::breakpoint
| sym::bswap | sym::bswap
| sym::caller_location | sym::caller_location
| sym::carrying_mul_add | sym::carrying_mul_add
| sym::carryless_mul
| sym::ceilf16 | sym::ceilf16
| sym::ceilf32 | sym::ceilf32
| sym::ceilf64 | sym::ceilf64
@ -222,7 +222,6 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi
| sym::wrapping_add | sym::wrapping_add
| sym::wrapping_mul | sym::wrapping_mul
| sym::wrapping_sub | sym::wrapping_sub
| sym::write_box_via_move
// tidy-alphabetical-end // tidy-alphabetical-end
=> hir::Safety::Safe, => hir::Safety::Safe,
_ => hir::Safety::Unsafe, _ => hir::Safety::Unsafe,
@ -565,7 +564,6 @@ pub(crate) fn check_intrinsic_type(
(1, 0, vec![param(0), param(0)], param(0)) (1, 0, vec![param(0), param(0)], param(0))
} }
sym::saturating_add | sym::saturating_sub => (1, 0, vec![param(0), param(0)], param(0)), sym::saturating_add | sym::saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
sym::carryless_mul => (1, 0, vec![param(0), param(0)], param(0)),
sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
(1, 0, vec![param(0), param(0)], param(0)) (1, 0, vec![param(0), param(0)], param(0))
} }
@ -584,13 +582,6 @@ pub(crate) fn check_intrinsic_type(
sym::write_via_move => { sym::write_via_move => {
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit) (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
} }
sym::write_box_via_move => {
let t = param(0);
let maybe_uninit_t = Ty::new_maybe_uninit(tcx, t);
let box_mu_t = Ty::new_box(tcx, maybe_uninit_t);
(1, 0, vec![box_mu_t, param(0)], box_mu_t)
}
sym::typed_swap_nonoverlapping => { sym::typed_swap_nonoverlapping => {
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)); 2], tcx.types.unit) (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)); 2], tcx.types.unit)
@ -696,6 +687,8 @@ pub(crate) fn check_intrinsic_type(
sym::ub_checks | sym::overflow_checks => (0, 0, Vec::new(), tcx.types.bool), sym::ub_checks | sym::overflow_checks => (0, 0, Vec::new(), tcx.types.bool),
sym::box_new => (1, 0, vec![param(0)], Ty::new_box(tcx, param(0))),
// contract_check_requires::<C>(C) -> bool, where C: impl Fn() -> bool // contract_check_requires::<C>(C) -> bool, where C: impl Fn() -> bool
sym::contract_check_requires => (1, 0, vec![param(0)], tcx.types.unit), sym::contract_check_requires => (1, 0, vec![param(0)], tcx.types.unit),
sym::contract_check_ensures => { sym::contract_check_ensures => {
@ -718,8 +711,7 @@ pub(crate) fn check_intrinsic_type(
| sym::simd_fmin | sym::simd_fmin
| sym::simd_fmax | sym::simd_fmax
| sym::simd_saturating_add | sym::simd_saturating_add
| sym::simd_saturating_sub | sym::simd_saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
| sym::simd_carryless_mul => (1, 0, vec![param(0), param(0)], param(0)),
sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)), sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)),
sym::simd_neg sym::simd_neg
| sym::simd_bswap | sym::simd_bswap

View file

@ -197,6 +197,18 @@ pub(super) fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDef
} }
} }
fn report_forbidden_specialization(tcx: TyCtxt<'_>, impl_item: DefId, parent_impl: DefId) {
let span = tcx.def_span(impl_item);
let ident = tcx.item_ident(impl_item);
let err = match tcx.span_of_impl(parent_impl) {
Ok(sp) => errors::ImplNotMarkedDefault::Ok { span, ident, ok_label: sp },
Err(cname) => errors::ImplNotMarkedDefault::Err { span, ident, cname },
};
tcx.dcx().emit_err(err);
}
fn missing_items_err( fn missing_items_err(
tcx: TyCtxt<'_>, tcx: TyCtxt<'_>,
impl_def_id: LocalDefId, impl_def_id: LocalDefId,

Some files were not shown because too many files have changed in this diff Show more