Merge pull request #740 from rust-lang/sync_from_rust_2025_07_21

Sync from rust 2025/07/21
This commit is contained in:
antoyo 2025-08-03 16:53:19 -04:00 committed by GitHub
commit 36a516d810
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 278 additions and 90 deletions

View file

@ -143,6 +143,15 @@ dependencies = [
"libc",
]
[[package]]
name = "object"
version = "0.37.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03fd943161069e1768b4b3d050890ba48730e590f57e56d4aa04e7e090e61b4a"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.20.2"
@ -179,6 +188,7 @@ dependencies = [
"boml",
"gccjit",
"lang_tester",
"object",
"tempfile",
]

View file

@ -22,6 +22,8 @@ master = ["gccjit/master"]
default = ["master"]
[dependencies]
object = { version = "0.37.0", default-features = false, features = ["std", "read"] }
tempfile = "3.20"
gccjit = "2.7"
#gccjit = { git = "https://github.com/rust-lang/gccjit.rs" }
@ -31,7 +33,6 @@ gccjit = "2.7"
[dev-dependencies]
boml = "0.3.1"
lang_tester = "0.8.0"
tempfile = "3.20"
[profile.dev]
# By compiling dependencies with optimizations, performing tests gets much faster.

View file

@ -31,7 +31,7 @@ pub fn run() -> Result<(), String> {
Some("clones/abi-cafe".as_ref()),
true,
)
.map_err(|err| (format!("Git clone failed with message: {err:?}!")))?;
.map_err(|err| format!("Git clone failed with message: {err:?}!"))?;
// Configure abi-cafe to use the exact same rustc version we use - this is crucial.
// Otherwise, the concept of ABI compatibility becomes meanignless.
std::fs::copy("rust-toolchain", "clones/abi-cafe/rust-toolchain")

View file

@ -43,18 +43,18 @@ pub fn run() -> Result<(), String> {
"--start" => {
start =
str::parse(&args.next().ok_or_else(|| "Fuzz start not provided!".to_string())?)
.map_err(|err| (format!("Fuzz start not a number {err:?}!")))?;
.map_err(|err| format!("Fuzz start not a number {err:?}!"))?;
}
"--count" => {
count =
str::parse(&args.next().ok_or_else(|| "Fuzz count not provided!".to_string())?)
.map_err(|err| (format!("Fuzz count not a number {err:?}!")))?;
.map_err(|err| format!("Fuzz count not a number {err:?}!"))?;
}
"-j" | "--jobs" => {
threads = str::parse(
&args.next().ok_or_else(|| "Fuzz thread count not provided!".to_string())?,
)
.map_err(|err| (format!("Fuzz thread count not a number {err:?}!")))?;
.map_err(|err| format!("Fuzz thread count not a number {err:?}!"))?;
}
_ => return Err(format!("Unknown option {arg}")),
}
@ -66,7 +66,7 @@ pub fn run() -> Result<(), String> {
Some("clones/rustlantis".as_ref()),
true,
)
.map_err(|err| (format!("Git clone failed with message: {err:?}!")))?;
.map_err(|err| format!("Git clone failed with message: {err:?}!"))?;
// Ensure that we are on the newest rustlantis commit.
let cmd: &[&dyn AsRef<OsStr>] = &[&"git", &"pull", &"origin"];

View file

@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2025-07-04"
channel = "nightly-2025-07-21"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View file

@ -1,6 +1,6 @@
use gccjit::{Context, FunctionType, GlobalKind, ToRValue, Type};
#[cfg(feature = "master")]
use gccjit::{FnAttribute, VarAttribute};
use gccjit::FnAttribute;
use gccjit::{Context, FunctionType, RValue, ToRValue, Type};
use rustc_ast::expand::allocator::{
ALLOCATOR_METHODS, AllocatorKind, AllocatorTy, NO_ALLOC_SHIM_IS_UNSTABLE,
alloc_error_handler_name, default_fn_name, global_fn_name,
@ -71,15 +71,13 @@ pub(crate) unsafe fn codegen(
None,
);
let name = mangle_internal_symbol(tcx, OomStrategy::SYMBOL);
let global = context.new_global(None, GlobalKind::Exported, i8, name);
#[cfg(feature = "master")]
global.add_attribute(VarAttribute::Visibility(symbol_visibility_to_gcc(
tcx.sess.default_visibility(),
)));
let value = tcx.sess.opts.unstable_opts.oom.should_panic();
let value = context.new_rvalue_from_int(i8, value as i32);
global.global_set_initializer_rvalue(value);
create_const_value_function(
tcx,
context,
&mangle_internal_symbol(tcx, OomStrategy::SYMBOL),
i8,
context.new_rvalue_from_int(i8, tcx.sess.opts.unstable_opts.oom.should_panic() as i32),
);
create_wrapper_function(
tcx,
@ -91,6 +89,34 @@ pub(crate) unsafe fn codegen(
);
}
fn create_const_value_function(
tcx: TyCtxt<'_>,
context: &Context<'_>,
name: &str,
output: Type<'_>,
value: RValue<'_>,
) {
let func = context.new_function(None, FunctionType::Exported, output, &[], name, false);
#[cfg(feature = "master")]
{
func.add_attribute(FnAttribute::Visibility(symbol_visibility_to_gcc(
tcx.sess.default_visibility(),
)));
// FIXME(antoyo): cg_llvm sets AlwaysInline, but AlwaysInline is different in GCC and using
// it here will causes linking errors when using LTO.
func.add_attribute(FnAttribute::Inline);
}
if tcx.sess.must_emit_unwind_tables() {
// TODO(antoyo): emit unwind tables.
}
let block = func.new_block("entry");
block.end_with_return(None, value);
}
fn create_wrapper_function(
tcx: TyCtxt<'_>,
context: &Context<'_>,

View file

@ -87,7 +87,7 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
#[cfg_attr(not(feature = "master"), allow(unused_variables))] func: Function<'gcc>,
instance: ty::Instance<'tcx>,
) {
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
let codegen_fn_attrs = cx.tcx.codegen_instance_attrs(instance.def);
#[cfg(feature = "master")]
{

View file

@ -24,7 +24,7 @@ use std::sync::Arc;
use gccjit::{Context, OutputKind};
use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::symbol_export;
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::traits::*;
@ -176,7 +176,7 @@ pub(crate) fn run_fat(
cgcx: &CodegenContext<GccCodegenBackend>,
modules: Vec<FatLtoInput<GccCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
) -> Result<ModuleCodegen<GccContext>, FatalError> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, dcx)?;
@ -201,7 +201,7 @@ fn fat_lto(
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
tmp_path: TempDir,
//symbols_below_threshold: &[String],
) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
) -> Result<ModuleCodegen<GccContext>, FatalError> {
let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module");
info!("going for a fat lto");
@ -334,7 +334,7 @@ fn fat_lto(
// of now.
module.module_llvm.temp_dir = Some(tmp_path);
Ok(LtoModuleCodegen::Fat(module))
Ok(module)
}
pub struct ModuleBuffer(PathBuf);
@ -358,7 +358,7 @@ pub(crate) fn run_thin(
cgcx: &CodegenContext<GccCodegenBackend>,
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<LtoModuleCodegen<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
) -> Result<(Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, dcx)?;
@ -427,7 +427,7 @@ fn thin_lto(
tmp_path: TempDir,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
//_symbols_below_threshold: &[String],
) -> Result<(Vec<LtoModuleCodegen<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
) -> Result<(Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
info!("going for that thin, thin LTO");
@ -573,8 +573,7 @@ fn thin_lto(
}*/
info!(" - {}: re-compiled", module_name);
opt_jobs
.push(LtoModuleCodegen::Thin(ThinModule { shared: shared.clone(), idx: module_index }));
opt_jobs.push(ThinModule { shared: shared.clone(), idx: module_index });
}
// Save the current ThinLTO import information for the next compilation

View file

@ -16,10 +16,12 @@ use crate::{GccCodegenBackend, GccContext};
pub(crate) fn codegen(
cgcx: &CodegenContext<GccCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
module: ModuleCodegen<GccContext>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);
{
let context = &module.module_llvm.context;

View file

@ -539,9 +539,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn ret(&mut self, mut value: RValue<'gcc>) {
let expected_return_type = self.current_func().get_return_type();
if !expected_return_type.is_compatible_with(value.get_type()) {
// NOTE: due to opaque pointers now being used, we need to cast here.
value = self.context.new_cast(self.location, value, expected_return_type);
let value_type = value.get_type();
if !expected_return_type.is_compatible_with(value_type) {
// NOTE: due to opaque pointers now being used, we need to (bit)cast here.
if self.is_native_int_type(value_type) && self.is_native_int_type(expected_return_type)
{
value = self.context.new_cast(self.location, value, expected_return_type);
} else {
value = self.context.new_bitcast(self.location, value, expected_return_type);
}
}
self.llbb().end_with_return(self.location, value);
}
@ -926,10 +932,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
.get_address(self.location)
}
fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!();
}
fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
let block = self.llbb();
let function = block.get_function();
@ -1282,11 +1284,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn intcast(
&mut self,
value: RValue<'gcc>,
mut value: RValue<'gcc>,
dest_typ: Type<'gcc>,
_is_signed: bool,
is_signed: bool,
) -> RValue<'gcc> {
// NOTE: is_signed is for value, not dest_typ.
let value_type = value.get_type();
if is_signed && !value_type.is_signed(self.cx) {
let signed_type = value_type.to_signed(self.cx);
value = self.gcc_int_cast(value, signed_type);
} else if !is_signed && value_type.is_signed(self.cx) {
let unsigned_type = value_type.to_unsigned(self.cx);
value = self.gcc_int_cast(value, unsigned_type);
}
self.gcc_int_cast(value, dest_typ)
}

View file

@ -105,7 +105,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
let is_hidden = if is_generic {
// This is a monomorphization of a generic function.
if !(cx.tcx.sess.opts.share_generics()
|| tcx.codegen_fn_attrs(instance_def_id).inline
|| tcx.codegen_instance_attrs(instance.def).inline
== rustc_attr_data_structures::InlineAttr::Never)
{
// When not sharing generics, all instances are in the same

View file

@ -1,7 +1,6 @@
use gccjit::{LValue, RValue, ToRValue, Type};
use rustc_abi as abi;
use rustc_abi::HasDataLayout;
use rustc_abi::Primitive::Pointer;
use rustc_abi::{self as abi, HasDataLayout};
use rustc_codegen_ssa::traits::{
BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods,
};
@ -162,7 +161,7 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
}
fn const_usize(&self, i: u64) -> RValue<'gcc> {
let bit_size = self.data_layout().pointer_size.bits();
let bit_size = self.data_layout().pointer_size().bits();
if bit_size < 64 {
// make sure it doesn't overflow
assert!(i < (1 << bit_size));
@ -282,6 +281,13 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
let init = self.const_data_from_alloc(alloc);
self.static_addr_of(init, alloc.inner().align, None)
}
GlobalAlloc::TypeId { .. } => {
let val = self.const_usize(offset.bytes());
// This is still a variable of pointer type, even though we only use the provenance
// of that pointer in CTFE and Miri. But to make LLVM's type system happy,
// we need an int-to-ptr cast here (it doesn't matter at all which provenance that picks).
return self.context.new_cast(None, val, ty);
}
GlobalAlloc::Static(def_id) => {
assert!(self.tcx.is_static(def_id));
self.get_static(def_id).get_address(None)

View file

@ -294,7 +294,7 @@ pub(crate) fn const_alloc_to_gcc_uncached<'gcc>(
let alloc = alloc.inner();
let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
let pointer_size = dl.pointer_size().bytes() as usize;
let mut next_offset = 0;
for &(offset, prov) in alloc.provenance().ptrs().iter() {
@ -331,7 +331,7 @@ pub(crate) fn const_alloc_to_gcc_uncached<'gcc>(
),
abi::Scalar::Initialized {
value: Primitive::Pointer(address_space),
valid_range: WrappingRange::full(dl.pointer_size),
valid_range: WrappingRange::full(dl.pointer_size()),
},
cx.type_i8p_ext(address_space),
));

View file

@ -4,12 +4,15 @@
// cSpell:words cmpti divti modti mulodi muloti udivti umodti
use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp};
use gccjit::{
BinaryOp, CType, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp,
};
use rustc_abi::{CanonAbi, Endian, ExternAbi};
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeCodegenMethods, BuilderMethods, OverflowOp};
use rustc_middle::ty::{self, Ty};
use rustc_target::callconv::{ArgAbi, ArgAttributes, FnAbi, PassMode};
use rustc_type_ir::{Interner, TyKind};
use crate::builder::{Builder, ToGccComp};
use crate::common::{SignType, TypeReflection};
@ -167,9 +170,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
if a_type.is_vector() {
// Vector types need to be bitcast.
// TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
b = self.context.new_bitcast(self.location, b, a.get_type());
b = self.context.new_bitcast(self.location, b, a_type);
} else {
b = self.context.new_cast(self.location, b, a.get_type());
b = self.context.new_cast(self.location, b, a_type);
}
}
self.context.new_binary_op(self.location, operation, a_type, a, b)
@ -216,13 +219,22 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
operation_name: &str,
signed: bool,
a: RValue<'gcc>,
b: RValue<'gcc>,
mut b: RValue<'gcc>,
) -> RValue<'gcc> {
let a_type = a.get_type();
let b_type = b.get_type();
if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type))
|| (a_type.is_vector() && b_type.is_vector())
{
if !a_type.is_compatible_with(b_type) {
if a_type.is_vector() {
// Vector types need to be bitcast.
// TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
b = self.context.new_bitcast(self.location, b, a_type);
} else {
b = self.context.new_cast(self.location, b, a_type);
}
}
self.context.new_binary_op(self.location, operation, a_type, a, b)
} else {
debug_assert!(a_type.dyncast_array().is_some());
@ -351,6 +363,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
.new_local(self.location, rhs.get_type(), "binopResult")
.get_address(self.location);
let new_type = type_kind_to_gcc_type(new_kind);
let new_type = self.context.new_c_type(new_type);
let lhs = self.context.new_cast(self.location, lhs, new_type);
let rhs = self.context.new_cast(self.location, rhs, new_type);
let res = self.context.new_cast(self.location, res, new_type.make_pointer());
let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
(res.dereference(self.location).to_rvalue(), overflow)
}
@ -477,11 +494,27 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let lhs_low = self.context.new_cast(self.location, self.low(lhs), unsigned_type);
let rhs_low = self.context.new_cast(self.location, self.low(rhs), unsigned_type);
let mut lhs_high = self.high(lhs);
let mut rhs_high = self.high(rhs);
match op {
IntPredicate::IntUGT
| IntPredicate::IntUGE
| IntPredicate::IntULT
| IntPredicate::IntULE => {
lhs_high = self.context.new_cast(self.location, lhs_high, unsigned_type);
rhs_high = self.context.new_cast(self.location, rhs_high, unsigned_type);
}
// TODO(antoyo): we probably need to handle signed comparison for unsigned
// integers.
_ => (),
}
let condition = self.context.new_comparison(
self.location,
ComparisonOp::LessThan,
self.high(lhs),
self.high(rhs),
lhs_high,
rhs_high,
);
self.llbb().end_with_conditional(self.location, condition, block1, block2);
@ -495,8 +528,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let condition = self.context.new_comparison(
self.location,
ComparisonOp::GreaterThan,
self.high(lhs),
self.high(rhs),
lhs_high,
rhs_high,
);
block2.end_with_conditional(self.location, condition, block3, block4);
@ -620,7 +653,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
}
}
pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
pub fn gcc_xor(&self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
let a_type = a.get_type();
let b_type = b.get_type();
if a_type.is_vector() && b_type.is_vector() {
@ -628,6 +661,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
a ^ b
} else if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)
{
if !a_type.is_compatible_with(b_type) {
b = self.context.new_cast(self.location, b, a_type);
}
a ^ b
} else {
self.concat_low_high_rvalues(
@ -1042,3 +1078,25 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.context.new_array_constructor(None, typ, &values)
}
}
fn type_kind_to_gcc_type<I: Interner>(kind: TyKind<I>) -> CType {
use rustc_middle::ty::IntTy::*;
use rustc_middle::ty::UintTy::*;
use rustc_middle::ty::{Int, Uint};
match kind {
Int(I8) => CType::Int8t,
Int(I16) => CType::Int16t,
Int(I32) => CType::Int32t,
Int(I64) => CType::Int64t,
Int(I128) => CType::Int128t,
Uint(U8) => CType::UInt8t,
Uint(U16) => CType::UInt16t,
Uint(U32) => CType::UInt32t,
Uint(U64) => CType::UInt64t,
Uint(U128) => CType::UInt128t,
_ => unimplemented!("Kind: {:?}", kind),
}
}

View file

@ -505,7 +505,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
// so we re-use that same threshold here.
layout.size() <= self.data_layout().pointer_size * 2
layout.size() <= self.data_layout().pointer_size() * 2
}
};
@ -889,10 +889,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// TODO(antoyo): use width?
let arg_type = arg.get_type();
let result_type = self.u32_type;
let arg = if arg_type.is_signed(self.cx) {
let new_type = arg_type.to_unsigned(self.cx);
self.gcc_int_cast(arg, new_type)
} else {
arg
};
let arg_type = arg.get_type();
let count_leading_zeroes =
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
// instead of using is_uint().
if arg_type.is_uint(self.cx) {
if arg_type.is_uchar(self.cx) || arg_type.is_ushort(self.cx) || arg_type.is_uint(self.cx) {
"__builtin_clz"
}
else if arg_type.is_ulong(self.cx) {

View file

@ -206,6 +206,28 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
);
}
#[cfg(feature = "master")]
if name == sym::simd_funnel_shl {
return Ok(simd_funnel_shift(
bx,
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
true,
));
}
#[cfg(feature = "master")]
if name == sym::simd_funnel_shr {
return Ok(simd_funnel_shift(
bx,
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
false,
));
}
if name == sym::simd_bswap {
return Ok(simd_bswap(bx, args[0].immediate()));
}
@ -1184,7 +1206,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let lhs = args[0].immediate();
let rhs = args[1].immediate();
let is_add = name == sym::simd_saturating_add;
let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
let ptr_bits = bx.tcx().data_layout.pointer_size().bits() as _;
let (signed, elem_width, elem_ty) = match *in_elem.kind() {
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)),
ty::Uint(i) => {
@ -1434,3 +1456,62 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
unimplemented!("simd {}", name);
}
#[cfg(feature = "master")]
fn simd_funnel_shift<'a, 'gcc, 'tcx>(
bx: &mut Builder<'a, 'gcc, 'tcx>,
a: RValue<'gcc>,
b: RValue<'gcc>,
shift: RValue<'gcc>,
shift_left: bool,
) -> RValue<'gcc> {
use crate::common::SignType;
let a_type = a.get_type();
let vector_type = a_type.unqualified().dyncast_vector().expect("vector type");
let num_units = vector_type.get_num_units();
let elem_type = vector_type.get_element_type();
let (new_int_type, int_shift_val, int_mask) = if elem_type.is_compatible_with(bx.u8_type)
|| elem_type.is_compatible_with(bx.i8_type)
{
(bx.u16_type, 8, u8::MAX as u64)
} else if elem_type.is_compatible_with(bx.u16_type) || elem_type.is_compatible_with(bx.i16_type)
{
(bx.u32_type, 16, u16::MAX as u64)
} else if elem_type.is_compatible_with(bx.u32_type) || elem_type.is_compatible_with(bx.i32_type)
{
(bx.u64_type, 32, u32::MAX as u64)
} else if elem_type.is_compatible_with(bx.u64_type) || elem_type.is_compatible_with(bx.i64_type)
{
(bx.u128_type, 64, u64::MAX)
} else {
unimplemented!("funnel shift on {:?}", elem_type);
};
let int_mask = bx.context.new_rvalue_from_long(new_int_type, int_mask as i64);
let int_shift_val = bx.context.new_rvalue_from_int(new_int_type, int_shift_val);
let mut elements = vec![];
let unsigned_type = elem_type.to_unsigned(bx);
for i in 0..num_units {
let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32);
let a_val = bx.context.new_vector_access(None, a, index).to_rvalue();
let a_val = bx.context.new_bitcast(None, a_val, unsigned_type);
// TODO: we probably need to use gcc_int_cast instead.
let a_val = bx.gcc_int_cast(a_val, new_int_type);
let b_val = bx.context.new_vector_access(None, b, index).to_rvalue();
let b_val = bx.context.new_bitcast(None, b_val, unsigned_type);
let b_val = bx.gcc_int_cast(b_val, new_int_type);
let shift_val = bx.context.new_vector_access(None, shift, index).to_rvalue();
let shift_val = bx.gcc_int_cast(shift_val, new_int_type);
let mut val = a_val << int_shift_val | b_val;
if shift_left {
val = (val << shift_val) >> int_shift_val;
} else {
val = (val >> shift_val) & int_mask;
}
let val = bx.gcc_int_cast(val, elem_type);
elements.push(val);
}
bx.context.new_rvalue_from_vector(None, a_type, &elements)
}

View file

@ -19,19 +19,15 @@
#![doc(rust_logo)]
#![feature(rustdoc_internals)]
#![feature(rustc_private)]
#![allow(broken_intra_doc_links)]
#![recursion_limit = "256"]
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
#![deny(clippy::pattern_type_mismatch)]
#![allow(clippy::needless_lifetimes, clippy::uninlined_format_args)]
// Some "regular" crates we want to share with rustc
extern crate object;
// These crates are pulled from the sysroot because they are part of
// rustc's public API, so we need to ensure version compatibility.
extern crate smallvec;
// FIXME(antoyo): clippy bug: remove the #[allow] when it's fixed.
#[allow(unused_extern_crates)]
extern crate tempfile;
#[macro_use]
extern crate tracing;
@ -55,6 +51,7 @@ extern crate rustc_session;
extern crate rustc_span;
extern crate rustc_symbol_mangling;
extern crate rustc_target;
extern crate rustc_type_ir;
// This prevents duplicating functions and statics that are already part of the host rustc process.
#[allow(unused_extern_crates)]
@ -97,7 +94,7 @@ use gccjit::{CType, Context, OptimizationLevel};
use gccjit::{TargetInfo, Version};
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_ast::expand::autodiff_attrs::AutoDiffItem;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{
CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
};
@ -360,11 +357,16 @@ impl WriteBackendMethods for GccCodegenBackend {
type ThinData = ThinData;
type ThinBuffer = ThinBuffer;
fn run_fat_lto(
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<Self>, FatalError> {
diff_functions: Vec<AutoDiffItem>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
if !diff_functions.is_empty() {
unimplemented!();
}
back::lto::run_fat(cgcx, modules, cached_modules)
}
@ -372,7 +374,7 @@ impl WriteBackendMethods for GccCodegenBackend {
cgcx: &CodegenContext<Self>,
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError> {
back::lto::run_thin(cgcx, modules, cached_modules)
}
@ -394,14 +396,6 @@ impl WriteBackendMethods for GccCodegenBackend {
Ok(())
}
fn optimize_fat(
_cgcx: &CodegenContext<Self>,
_module: &mut ModuleCodegen<Self::Module>,
) -> Result<(), FatalError> {
// TODO(antoyo)
Ok(())
}
fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: ThinModule<Self>,
@ -411,11 +405,10 @@ impl WriteBackendMethods for GccCodegenBackend {
fn codegen(
cgcx: &CodegenContext<Self>,
dcx: DiagCtxtHandle<'_>,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
back::write::codegen(cgcx, dcx, module, config)
back::write::codegen(cgcx, module, config)
}
fn prepare_thin(
@ -436,15 +429,6 @@ impl WriteBackendMethods for GccCodegenBackend {
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::write::link(cgcx, dcx, modules)
}
fn autodiff(
_cgcx: &CodegenContext<Self>,
_module: &ModuleCodegen<Self::Module>,
_diff_functions: Vec<AutoDiffItem>,
_config: &ModuleConfig,
) -> Result<(), FatalError> {
unimplemented!()
}
}
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit

View file

@ -53,7 +53,7 @@ impl<'gcc, 'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
self.linkage.set(base::linkage_to_gcc(linkage));
let decl = self.declare_fn(symbol_name, fn_abi);
//let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
//let attrs = self.tcx.codegen_instance_attrs(instance.def);
attributes::from_fn_attrs(self, decl, instance);

View file

@ -28,6 +28,6 @@ tests/ui/macros/macro-comma-behavior-rpass.rs
tests/ui/macros/rfc-2011-nicer-assert-messages/assert-with-custom-errors-does-not-create-unnecessary-code.rs
tests/ui/macros/rfc-2011-nicer-assert-messages/feature-gate-generic_assert.rs
tests/ui/macros/stringify.rs
tests/ui/reexport-test-harness-main.rs
tests/ui/rfcs/rfc-1937-termination-trait/termination-trait-in-test.rs
tests/ui/binding/fn-arg-incomplete-pattern-drop-order.rs
tests/ui/lto/debuginfo-lto-alloc.rs

View file

@ -6,7 +6,6 @@ tests/run-make/doctests-keep-binaries/
tests/run-make/doctests-runtool/
tests/run-make/emit-shared-files/
tests/run-make/exit-code/
tests/run-make/issue-22131/
tests/run-make/issue-64153/
tests/run-make/llvm-ident/
tests/run-make/native-link-modifier-bundle/

View file

@ -10,11 +10,10 @@ tests/ui/iterators/iter-sum-overflow-overflow-checks.rs
tests/ui/mir/mir_drop_order.rs
tests/ui/mir/mir_let_chains_drop_order.rs
tests/ui/mir/mir_match_guard_let_chains_drop_order.rs
tests/ui/oom_unwind.rs
tests/ui/panics/oom-panic-unwind.rs
tests/ui/panic-runtime/abort-link-to-unwinding-crates.rs
tests/ui/panic-runtime/abort.rs
tests/ui/panic-runtime/link-to-abort.rs
tests/ui/unwind-no-uwtable.rs
tests/ui/parser/unclosed-delimiter-in-dep.rs
tests/ui/consts/missing_span_in_backtrace.rs
tests/ui/drop/dynamic-drop.rs
@ -82,3 +81,6 @@ tests/ui/coroutine/panic-drops.rs
tests/ui/coroutine/panic-safe.rs
tests/ui/process/nofile-limit.rs
tests/ui/simd/intrinsic/generic-arithmetic-pass.rs
tests/ui/linking/no-gc-encapsulation-symbols.rs
tests/ui/panics/unwind-force-no-unwind-tables.rs
tests/ui/attributes/fn-align-dyn.rs

View file

@ -8,6 +8,7 @@ clzll
cmse
codegened
csky
ctfe
ctlz
ctpop
cttz
@ -25,6 +26,7 @@ fwrapv
gimple
hrtb
immediates
interner
liblto
llbb
llcx
@ -47,6 +49,7 @@ mavx
mcmodel
minimumf
minnumf
miri
monomorphization
monomorphizations
monomorphized