From 18b12218a4ae486b3c699efa159ab07277624982 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Wed, 28 May 2025 18:37:58 +0200 Subject: [PATCH 1/2] intrinsics: use const generic to set atomic ordering --- src/intrinsics/mod.rs | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/intrinsics/mod.rs b/src/intrinsics/mod.rs index 27a5df8b1520..a0f96d85dc3c 100644 --- a/src/intrinsics/mod.rs +++ b/src/intrinsics/mod.rs @@ -875,7 +875,6 @@ fn codegen_regular_intrinsic_call<'tcx>( let ptr = ptr.load_scalar(fx); let ty = generic_args.type_at(0); - let _ord = generic_args.const_at(1).to_value(); // FIXME: forward this to cranelift once they support that match ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { // FIXME implement 128bit atomics @@ -906,7 +905,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let val = CValue::by_val(val, fx.layout_of(ty)); ret.write_cvalue(fx, val); } - _ if intrinsic.as_str().starts_with("atomic_store") => { + sym::atomic_store => { intrinsic_args!(fx, args => (ptr, val); intrinsic); let ptr = ptr.load_scalar(fx); @@ -939,7 +938,7 @@ fn codegen_regular_intrinsic_call<'tcx>( fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr); } - _ if intrinsic.as_str().starts_with("atomic_xchg") => { + sym::atomic_xchg => { intrinsic_args!(fx, args => (ptr, new); intrinsic); let ptr = ptr.load_scalar(fx); @@ -960,8 +959,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_cxchg") => { - // both atomic_cxchg_* and atomic_cxchgweak_* + sym::atomic_cxchg | sym::atomic_cxchgweak => { intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic); let ptr = ptr.load_scalar(fx); @@ -984,7 +982,7 @@ fn codegen_regular_intrinsic_call<'tcx>( ret.write_cvalue(fx, ret_val) } - _ if intrinsic.as_str().starts_with("atomic_xadd") => { + sym::atomic_xadd => { intrinsic_args!(fx, args => (ptr, amount); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1006,7 +1004,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_xsub") => { + sym::atomic_xsub => { intrinsic_args!(fx, args => (ptr, amount); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1028,7 +1026,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_and") => { + sym::atomic_and => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1049,7 +1047,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_or") => { + sym::atomic_or => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1070,7 +1068,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_xor") => { + sym::atomic_xor => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1091,7 +1089,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_nand") => { + sym::atomic_nand => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1112,7 +1110,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_max") => { + sym::atomic_max => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1133,7 +1131,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_umax") => { + sym::atomic_umax => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1154,7 +1152,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_min") => { + sym::atomic_min => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); @@ -1175,7 +1173,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = CValue::by_val(old, layout); ret.write_cvalue(fx, old); } - _ if intrinsic.as_str().starts_with("atomic_umin") => { + sym::atomic_umin => { intrinsic_args!(fx, args => (ptr, src); intrinsic); let ptr = ptr.load_scalar(fx); From d5e9833af388c9f3122877fe355ff7b244c50516 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 11 Feb 2025 22:09:16 -0800 Subject: [PATCH 2/2] Remove rustc's notion of "preferred" alignment AKA `__alignof` In PR 90877 T-lang decided not to remove `intrinsics::pref_align_of`. However, the intrinsic and its supporting code 1. is a nightly feature, so can be removed at compiler/libs discretion 2. requires considerable effort in the compiler to support, as it necessarily complicates every single site reasoning about alignment 3. has been justified based on relevance to codegen, but it is only a requirement for C++ (not C, not Rust) stack frame layout for AIX, in ways Rust would not consider even with increased C++ interop 4. is only used by rustc to overalign some globals, not correctness 5. can be adequately replaced by other rules for globals, as it mostly affects alignments for a few types under 16 bytes of alignment 6. has only one clear benefactor: automating C -> Rust translation for GNU extensions like `__alignof` 7. such code was likely intended to be `alignof` or `_Alignof`, because the GNU extension is a "false friend" of the C keyword, which makes the choice to support such a mapping very questionable 8. makes it easy to do incorrect codegen in the compiler by its mere presence as usual Rust rules of alignment (e.g. `size == align * N`) do not hold with preferred alignment The implementation is clearly damaging the code quality of the compiler. Thus it is within the compiler team's purview to simply rip it out. If T-lang wishes to have this intrinsic restored for c2rust's benefit, it would have to use a radically different implementation that somehow does not cause internal incorrectness. Until then, remove the intrinsic and its supporting code, as one tool and an ill-considered GCC extension cannot justify risking correctness. Because we touch a fair amount of the compiler to change this at all, and unfortunately the duplication of AbiAndPrefAlign is deep-rooted, we keep an "AbiAlign" type which we can wean code off later. --- src/intrinsics/mod.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/intrinsics/mod.rs b/src/intrinsics/mod.rs index a0f96d85dc3c..1d1cf884e48b 100644 --- a/src/intrinsics/mod.rs +++ b/src/intrinsics/mod.rs @@ -812,11 +812,7 @@ fn codegen_regular_intrinsic_call<'tcx>( dest.write_cvalue(fx, val); } - sym::pref_align_of - | sym::needs_drop - | sym::type_id - | sym::type_name - | sym::variant_count => { + sym::needs_drop | sym::type_id | sym::type_name | sym::variant_count => { intrinsic_args!(fx, args => (); intrinsic); let const_val = fx