Merge pull request #469 from tempdragon/master

Clippy related fixes
This commit is contained in:
antoyo 2024-03-16 10:34:22 -04:00 committed by GitHub
commit 7ff5d39980
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 160 additions and 158 deletions

View file

@ -92,7 +92,7 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
let mut function_features = function_features
.iter()
.flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter())
.chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
.chain(codegen_fn_attrs.instruction_set.iter().map(|x| match *x {
InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature.
InstructionSetAttr::ArmT32 => "thumb-mode",
}))
@ -118,8 +118,8 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
if feature.starts_with('-') {
Some(format!("no{}", feature))
} else if feature.starts_with('+') {
Some(feature[1..].to_string())
} else if let Some(stripped) = feature.strip_prefix('+') {
Some(stripped.to_string())
} else {
Some(feature.to_string())
}

View file

@ -128,8 +128,7 @@ fn prepare_lto(
}
let archive_data = unsafe {
Mmap::map(File::open(&path).expect("couldn't open rlib"))
.expect("couldn't map rlib")
Mmap::map(File::open(path).expect("couldn't open rlib")).expect("couldn't map rlib")
};
let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
let obj_files = archive

View file

@ -104,7 +104,7 @@ pub(crate) unsafe fn codegen(
// FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
// transmuting an rvalue to an lvalue.
// Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
context.dump_reproducer_to_file(format!("/tmp/reproducers/{}.c", module.name));
println!("Dumped reproducer {}", module.name);
}
if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {

View file

@ -135,7 +135,7 @@ pub fn compile_codegen_unit(
let target_cpu = gcc_util::target_cpu(tcx.sess);
if target_cpu != "generic" {
context.add_command_line_option(&format!("-march={}", target_cpu));
context.add_command_line_option(format!("-march={}", target_cpu));
}
if tcx

View file

@ -225,7 +225,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let mut on_stack_param_indices = FxHashSet::default();
if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
on_stack_param_indices = indices.clone();
on_stack_param_indices.clone_from(indices);
}
if all_args_match {
@ -256,8 +256,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
actual_val.dereference(self.location).to_rvalue()
} else {
assert!(
!((actual_ty.is_vector() && !expected_ty.is_vector())
|| (!actual_ty.is_vector() && expected_ty.is_vector())),
(!expected_ty.is_vector() || actual_ty.is_vector())
&& (expected_ty.is_vector() || !actual_ty.is_vector()),
"{:?} ({}) -> {:?} ({}), index: {:?}[{}]",
actual_ty,
actual_ty.is_vector(),
@ -277,8 +277,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
.collect();
// NOTE: to take into account variadic functions.
for i in casted_args.len()..args.len() {
casted_args.push(args[i]);
for arg in args.iter().skip(casted_args.len()) {
casted_args.push(*arg);
}
Cow::Owned(casted_args)
@ -353,7 +353,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let function_address_names = self.function_address_names.borrow();
let original_function_name = function_address_names.get(&func_ptr);
llvm::adjust_intrinsic_arguments(
&self,
self,
gcc_func,
args.into(),
&func_name,
@ -361,7 +361,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
)
};
let args_adjusted = args.len() != previous_arg_count;
let args = self.check_ptr_call("call", func_ptr, &*args);
let args = self.check_ptr_call("call", func_ptr, &args);
// gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local or call add_eval().
@ -373,7 +373,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
unsafe { RETURN_VALUE_COUNT += 1 };
let return_value = self.cx.context.new_call_through_ptr(self.location, func_ptr, &args);
let return_value = llvm::adjust_intrinsic_return_value(
&self,
self,
return_value,
&func_name,
&args,
@ -441,7 +441,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
self.block.add_assignment(
self.location,
result,
self.cx.context.new_call(self.location, func, &args),
self.cx.context.new_call(self.location, func, args),
);
result.to_rvalue()
}
@ -595,7 +595,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
) -> RValue<'gcc> {
let try_block = self.current_func().new_block("try");
let current_block = self.block.clone();
let current_block = self.block;
self.block = try_block;
let call = self.call(typ, fn_attrs, None, func, args, None); // TODO(antoyo): use funclet here?
self.block = current_block;
@ -1176,7 +1176,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
// NOTE: due to opaque pointers now being used, we need to cast here.
let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
// NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
let mut indices = indices.into_iter();
let mut indices = indices.iter();
let index = indices.next().expect("first index in inbounds_gep");
let mut result = self.context.new_array_access(self.location, ptr, *index);
for index in indices {
@ -1684,7 +1684,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
// FIXME(antoyo): this does not zero-extend.
if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
if value.get_type().is_bool() && dest_typ.is_i8(self.cx) {
// FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
// Fix the code in codegen_ssa::base::from_immediate.
return value;
@ -2057,7 +2057,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
self.context.new_rvalue_from_vector(self.location, mask_type, &vector_elements);
let shifted = self.context.new_rvalue_vector_perm(self.location, res, res, mask);
shift *= 2;
res = op(res, shifted, &self.context);
res = op(res, shifted, self.context);
}
self.context
.new_vector_access(self.location, res, self.context.new_rvalue_zero(self.int_type))
@ -2073,7 +2073,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
}
pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
let loc = self.location.clone();
let loc = self.location;
self.vector_reduce(src, |a, b, context| context.new_binary_op(loc, op, a.get_type(), a, b))
}
@ -2090,7 +2090,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
let element_count = vector_type.get_num_units();
(0..element_count)
.into_iter()
.map(|i| {
self.context
.new_vector_access(
@ -2121,7 +2120,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
let element_count = vector_type.get_num_units();
(0..element_count)
.into_iter()
.map(|i| {
self.context
.new_vector_access(
@ -2141,7 +2139,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// Inspired by Hacker's Delight min implementation.
pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
let loc = self.location.clone();
let loc = self.location;
self.vector_reduce(src, |a, b, context| {
let differences_or_zeros = difference_or_zero(loc, a, b, context);
context.new_binary_op(loc, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
@ -2150,7 +2148,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// Inspired by Hacker's Delight max implementation.
pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
let loc = self.location.clone();
let loc = self.location;
self.vector_reduce(src, |a, b, context| {
let differences_or_zeros = difference_or_zero(loc, a, b, context);
context.new_binary_op(loc, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
@ -2345,7 +2343,7 @@ impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
fn target_spec(&self) -> &Target {
&self.cx.target_spec()
self.cx.target_spec()
}
}

View file

@ -28,7 +28,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
let func = if let Some(_func) = cx.get_declared_value(&sym) {
let func = if let Some(_func) = cx.get_declared_value(sym) {
// FIXME(antoyo): we never reach this because get_declared_value only returns global variables
// and here we try to get a function.
unreachable!();
@ -68,7 +68,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
}*/
} else {
cx.linkage.set(FunctionType::Extern);
let func = cx.declare_fn(&sym, &fn_abi);
let func = cx.declare_fn(sym, fn_abi);
attributes::from_fn_attrs(cx, func, instance);

View file

@ -21,7 +21,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
fn global_string(&self, string: &str) -> LValue<'gcc> {
// TODO(antoyo): handle non-null-terminated strings.
let string = self.context.new_string_literal(&*string);
let string = self.context.new_string_literal(string);
let sym = self.generate_local_symbol_name("str");
let global = self.declare_private_global(&sym, self.val_ty(string));
global.global_set_initializer_rvalue(string);
@ -170,7 +170,8 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
return self
.context
.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
} else if ty == self.double_type {
}
if ty == self.double_type {
return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
}
@ -293,7 +294,7 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
} else if self.is_ulonglong(cx) {
cx.longlong_type
} else {
self.clone()
*self
}
}
@ -319,7 +320,7 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
} else if self.is_longlong(cx) {
cx.ulonglong_type
} else {
self.clone()
*self
}
}
}
@ -432,7 +433,7 @@ impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
}
fn is_vector(&self) -> bool {
let mut typ = self.clone();
let mut typ = *self;
loop {
if typ.dyncast_vector().is_some() {
return true;

View file

@ -66,7 +66,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
let attrs = self.tcx.codegen_fn_attrs(def_id);
let value = match codegen_static_initializer(&self, def_id) {
let value = match codegen_static_initializer(self, def_id) {
Ok((value, _)) => value,
// Error has already been reported
Err(_) => return,
@ -231,13 +231,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
let global = self.declare_global(
&sym,
llty,
GlobalKind::Exported,
is_tls,
fn_attrs.link_section,
);
let global =
self.declare_global(sym, llty, GlobalKind::Exported, is_tls, fn_attrs.link_section);
if !self.tcx.is_reachable_non_generic(def_id) {
#[cfg(feature = "master")]
@ -246,7 +241,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
global
} else {
check_and_apply_linkage(&self, &fn_attrs, ty, sym)
check_and_apply_linkage(self, fn_attrs, ty, sym)
};
if !def_id.is_local() {
@ -367,11 +362,8 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
let gcc_type = cx.layout_of(ty).gcc_type(cx);
if let Some(linkage) = attrs.import_linkage {
// Declare a symbol `foo` with the desired linkage.
let global1 = cx.declare_global_with_linkage(
&sym,
cx.type_i8(),
base::global_linkage_to_gcc(linkage),
);
let global1 =
cx.declare_global_with_linkage(sym, cx.type_i8(), base::global_linkage_to_gcc(linkage));
// Declare an internal global `extern_with_linkage_foo` which
// is initialized with the address of `foo`. If `foo` is
@ -380,7 +372,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
real_name.push_str(sym);
let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section);
// TODO(antoyo): set linkage.
let value = cx.const_ptrcast(global1.get_address(None), gcc_type);
@ -397,6 +389,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
// don't do this then linker errors can be generated where the linker
// complains that one object files has a thread local version of the
// symbol and another one doesn't.
cx.declare_global(&sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section)
cx.declare_global(sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section)
}
}

View file

@ -384,7 +384,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
}
pub fn sess(&self) -> &'tcx Session {
&self.tcx.sess
self.tcx.sess
}
pub fn bitcast_if_needed(
@ -431,7 +431,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let func_name = self.tcx.symbol_name(instance).name;
let func = if self.intrinsics.borrow().contains_key(func_name) {
self.intrinsics.borrow()[func_name].clone()
self.intrinsics.borrow()[func_name]
} else {
get_fn(self, instance)
};
@ -485,7 +485,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let symbol_name = tcx.symbol_name(instance).name;
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
self.linkage.set(FunctionType::Extern);
let func = self.declare_fn(symbol_name, &fn_abi);
let func = self.declare_fn(symbol_name, fn_abi);
let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
func
}
@ -505,7 +505,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
fn sess(&self) -> &Session {
&self.tcx.sess
self.tcx.sess
}
fn check_overflow(&self) -> bool {
@ -612,7 +612,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
// user defined names
let mut name = String::with_capacity(prefix.len() + 6);
name.push_str(prefix);
name.push_str(".");
name.push('.');
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}

View file

@ -91,7 +91,7 @@ fn compute_mir_scopes<'gcc, 'tcx>(
/// FIXME(tempdragon/?): Add Scope Support Here.
fn make_mir_scope<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
instance: Instance<'tcx>,
_instance: Instance<'tcx>,
mir: &Body<'tcx>,
variables: &Option<BitSet<SourceScope>>,
debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
@ -104,7 +104,7 @@ fn make_mir_scope<'gcc, 'tcx>(
let scope_data = &mir.source_scopes[scope];
let parent_scope = if let Some(parent) = scope_data.parent_scope {
make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
make_mir_scope(cx, _instance, mir, variables, debug_context, instantiated, parent);
debug_context.scopes[parent]
} else {
// The root is the function itself.
@ -118,7 +118,7 @@ fn make_mir_scope<'gcc, 'tcx>(
return;
};
if let Some(vars) = variables {
if let Some(ref vars) = *variables {
if !vars.contains(scope) && scope_data.inlined.is_none() {
// Do not create a DIScope if there are no variables defined in this
// MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
@ -136,8 +136,14 @@ fn make_mir_scope<'gcc, 'tcx>(
let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
// FIXME(eddyb) this doesn't account for the macro-related
// `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
// TODO(tempdragon): Add scope support and then revert to cg_llvm version of this closure
// NOTE: These variables passed () here.
// Changed to comply to clippy.
/* let callsite_scope = */
parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
cx.dbg_loc(/* callsite_scope */ (), parent_scope.inlined_at, callsite_span)
});
let p_inlined_at = parent_scope.inlined_at;
// TODO(tempdragon): dbg_scope: Add support for scope extension here.
@ -225,7 +231,7 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
file_end_pos: BytePos(0),
};
let mut fn_debug_context = FunctionDebugContext {
scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes.as_slice()),
scopes: IndexVec::from_elem(empty_scope, mir.source_scopes.as_slice()),
inlined_function_scopes: Default::default(),
};
@ -274,16 +280,19 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
) -> Self::DILocation {
let pos = span.lo();
let DebugLoc { file, line, col } = self.lookup_debug_loc(pos);
let loc = match &file.name {
rustc_span::FileName::Real(name) => match name {
rustc_span::RealFileName::LocalPath(name) => {
let loc = match file.name {
rustc_span::FileName::Real(ref name) => match *name {
rustc_span::RealFileName::LocalPath(ref name) => {
if let Some(name) = name.to_str() {
self.context.new_location(name, line as i32, col as i32)
} else {
Location::null()
}
}
rustc_span::RealFileName::Remapped { local_path, virtual_name: _ } => {
rustc_span::RealFileName::Remapped {
ref local_path,
virtual_name: ref _unused,
} => {
if let Some(name) = local_path.as_ref() {
if let Some(name) = name.to_str() {
self.context.new_location(name, line as i32, col as i32)

View file

@ -35,7 +35,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
let name = self.generate_local_symbol_name("global");
self.context.new_global(None, GlobalKind::Internal, ty, &name)
self.context.new_global(None, GlobalKind::Internal, ty, name)
}
pub fn declare_global_with_linkage(
@ -176,16 +176,14 @@ fn declare_raw_fn<'gcc>(
cx.functions.borrow()[name]
} else {
let params: Vec<_> = param_types
.into_iter()
.iter()
.enumerate()
.map(|(index, param)| {
cx.context.new_parameter(None, *param, &format!("param{}", index))
}) // TODO(antoyo): set name.
.map(|(index, param)| cx.context.new_parameter(None, *param, format!("param{}", index))) // TODO(antoyo): set name.
.collect();
#[cfg(not(feature = "master"))]
let name = mangle_name(name);
let name = &mangle_name(name);
let func =
cx.context.new_function(None, cx.linkage.get(), return_type, &params, &name, variadic);
cx.context.new_function(None, cx.linkage.get(), return_type, &params, name, variadic);
cx.functions.borrow_mut().insert(name.to_string(), func);
#[cfg(feature = "master")]
@ -200,10 +198,10 @@ fn declare_raw_fn<'gcc>(
// create a wrapper function that calls rust_eh_personality.
let params: Vec<_> = param_types
.into_iter()
.iter()
.enumerate()
.map(|(index, param)| {
cx.context.new_parameter(None, *param, &format!("param{}", index))
cx.context.new_parameter(None, *param, format!("param{}", index))
}) // TODO(antoyo): set name.
.collect();
let gcc_func = cx.context.new_function(

View file

@ -2,8 +2,6 @@
//! This module exists because some integer types are not supported on some gcc platforms, e.g.
//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
use std::convert::TryFrom;
use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp};
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
@ -40,7 +38,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
self.cx.context.new_unary_op(self.location, operation, typ, a)
} else {
let element_type = typ.dyncast_array().expect("element type");
self.from_low_high_rvalues(
self.concat_low_high_rvalues(
typ,
self.cx.context.new_unary_op(
self.location,
@ -114,7 +112,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let shift_value = self.gcc_sub(b, sixty_four);
let high = self.high(a);
let sign = if a_type.is_signed(self) { high >> sixty_three } else { zero };
let array_value = self.from_low_high_rvalues(a_type, high >> shift_value, sign);
let array_value = self.concat_low_high_rvalues(a_type, high >> shift_value, sign);
then_block.add_assignment(self.location, result, array_value);
then_block.end_with_jump(self.location, after_block);
@ -126,12 +124,15 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let shift_value = self.gcc_sub(sixty_four, b);
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
let unsigned_type = native_int_type.to_unsigned(&self.cx);
let unsigned_type = native_int_type.to_unsigned(self.cx);
let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
let shifted_low = casted_low >> self.context.new_cast(self.location, b, unsigned_type);
let shifted_low = self.context.new_cast(self.location, shifted_low, native_int_type);
let array_value =
self.from_low_high_rvalues(a_type, (high << shift_value) | shifted_low, high >> b);
let array_value = self.concat_low_high_rvalues(
a_type,
(high << shift_value) | shifted_low,
high >> b,
);
actual_else_block.add_assignment(self.location, result, array_value);
actual_else_block.end_with_jump(self.location, after_block);
@ -255,10 +256,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
let new_kind = match typ.kind() {
let new_kind = match *typ.kind() {
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
t @ (Uint(_) | Int(_)) => t.clone(),
t @ (Uint(_) | Int(_)) => t,
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
};
@ -344,7 +345,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
}
};
let intrinsic = self.context.get_builtin_function(&name);
let intrinsic = self.context.get_builtin_function(name);
let res = self
.current_func()
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
@ -454,7 +455,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let native_int_type = a_type.dyncast_array().expect("get element type");
// NOTE: cast low to its unsigned type in order to perform a comparison correctly (e.g.
// the sign is only on high).
let unsigned_type = native_int_type.to_unsigned(&self.cx);
let unsigned_type = native_int_type.to_unsigned(self.cx);
let lhs_low = self.context.new_cast(self.location, self.low(lhs), unsigned_type);
let rhs_low = self.context.new_cast(self.location, self.low(rhs), unsigned_type);
@ -589,7 +590,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
| IntPredicate::IntULT
| IntPredicate::IntULE => {
if !a_type.is_vector() {
let unsigned_type = a_type.to_unsigned(&self.cx);
let unsigned_type = a_type.to_unsigned(self.cx);
lhs = self.context.new_cast(self.location, lhs, unsigned_type);
rhs = self.context.new_cast(self.location, rhs, unsigned_type);
}
@ -612,7 +613,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
{
a ^ b
} else {
self.from_low_high_rvalues(
self.concat_low_high_rvalues(
a_type,
self.low(a) ^ self.low(b),
self.high(a) ^ self.high(b),
@ -661,7 +662,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
self.llbb().end_with_conditional(self.location, condition, then_block, else_block);
let array_value =
self.from_low_high_rvalues(a_type, zero, self.low(a) << (b - sixty_four));
self.concat_low_high_rvalues(a_type, zero, self.low(a) << (b - sixty_four));
then_block.add_assignment(self.location, result, array_value);
then_block.end_with_jump(self.location, after_block);
@ -673,13 +674,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
// TODO(antoyo): adjust this ^ comment.
let unsigned_type = native_int_type.to_unsigned(&self.cx);
let unsigned_type = native_int_type.to_unsigned(self.cx);
let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
let shift_value = self.context.new_cast(self.location, sixty_four - b, unsigned_type);
let high_low =
self.context.new_cast(self.location, casted_low >> shift_value, native_int_type);
let array_value = self.from_low_high_rvalues(
let array_value = self.concat_low_high_rvalues(
a_type,
self.low(a) << b,
(self.high(a) << b) | high_low,
@ -708,7 +709,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// NOTE: we also need to swap the two elements here, in addition to swapping inside
// the elements themselves like done above.
return self.from_low_high_rvalues(arg_type, swapped_msb, swapped_lsb);
return self.concat_low_high_rvalues(arg_type, swapped_msb, swapped_lsb);
}
// TODO(antoyo): check if it's faster to use string literals and a
@ -727,10 +728,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
if self.is_native_int_type_or_bool(typ) {
self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
self.context.new_rvalue_from_long(typ, int)
} else {
// NOTE: set the sign in high.
self.from_low_high(typ, int, -(int.is_negative() as i64))
self.concat_low_high(typ, int, -(int.is_negative() as i64))
}
}
@ -740,10 +741,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let num = self.context.new_rvalue_from_long(self.u64_type, int as i64);
self.gcc_int_cast(num, typ)
} else if self.is_native_int_type_or_bool(typ) {
self.context
.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
self.context.new_rvalue_from_long(typ, int as i64)
} else {
self.from_low_high(typ, int as i64, 0)
self.concat_low_high(typ, int as i64, 0)
}
}
@ -760,7 +760,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let shift = high << sixty_four;
shift | self.context.new_cast(None, low, typ)
} else {
self.from_low_high(typ, low as i64, high as i64)
self.concat_low_high(typ, low as i64, high as i64)
}
} else if typ.is_i128(self) {
// FIXME(antoyo): libgccjit cannot create 128-bit values yet.
@ -775,7 +775,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
if self.is_native_int_type_or_bool(typ) {
self.context.new_rvalue_zero(typ)
} else {
self.from_low_high(typ, 0, 0)
self.concat_low_high(typ, 0, 0)
}
}
@ -813,7 +813,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
"both types should either be native or non-native for or operation"
);
let native_int_type = a_type.dyncast_array().expect("get element type");
self.from_low_high_rvalues(
self.concat_low_high_rvalues(
a_type,
self.context.new_binary_op(
loc,
@ -858,7 +858,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let is_negative =
self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
self.from_low_high_rvalues(
self.concat_low_high_rvalues(
dest_typ,
self.context.new_cast(None, value, dest_element_type),
self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
@ -978,7 +978,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
.to_rvalue()
}
fn from_low_high_rvalues(
fn concat_low_high_rvalues(
&self,
typ: Type<'gcc>,
low: RValue<'gcc>,
@ -993,7 +993,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.context.new_array_constructor(None, typ, &values)
}
fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
fn concat_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
let (first, last) = match self.sess().target.options.endian {
Endian::Little => (low, high),
Endian::Big => (high, low),

View file

@ -15,7 +15,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
// Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing
// arguments here.
if gcc_func.get_param_count() != args.len() {
match &*func_name {
match func_name {
// NOTE: the following intrinsics have a different number of parameters in LLVM and GCC.
"__builtin_ia32_prold512_mask"
| "__builtin_ia32_pmuldq512_mask"
@ -380,7 +380,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
_ => (),
}
} else {
match &*func_name {
match func_name {
"__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => {
let new_args = args.to_vec();
let arg3_type = gcc_func.get_param_type(2);
@ -629,14 +629,11 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
#[cfg(feature = "master")]
pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
match name {
"llvm.prefetch" => {
let gcc_name = "__builtin_prefetch";
let func = cx.context.get_builtin_function(gcc_name);
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func;
}
_ => (),
if name == "llvm.prefetch" {
let gcc_name = "__builtin_prefetch";
let func = cx.context.get_builtin_function(gcc_name);
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func;
}
let gcc_name = match name {

View file

@ -91,7 +91,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(
sym::abort => "abort",
_ => return None,
};
Some(cx.context.get_builtin_function(&gcc_name))
Some(cx.context.get_builtin_function(gcc_name))
}
impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
@ -122,6 +122,11 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
let simple = get_simple_intrinsic(self, name);
// FIXME(tempdragon): Re-enable `clippy::suspicious_else_formatting` if the following issue is solved:
// https://github.com/rust-lang/rust-clippy/issues/12497
// and leave `else if use_integer_compare` to be placed "as is".
#[allow(clippy::suspicious_else_formatting)]
let llval = match name {
_ if simple.is_some() => {
// FIXME(antoyo): remove this cast when the API supports function.
@ -166,7 +171,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
let ptr = args[0].immediate();
let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
let load = if let PassMode::Cast { cast: ref ty, pad_i32: _ } = fn_abi.ret.mode {
let gcc_ty = ty.gcc_type(self);
self.volatile_load(gcc_ty, ptr)
} else {
@ -385,7 +390,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
};
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
if let PassMode::Cast { cast: ref ty, .. } = fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
@ -586,7 +591,7 @@ fn int_type_width_signed<'gcc, 'tcx>(
ty: Ty<'tcx>,
cx: &CodegenCx<'gcc, 'tcx>,
) -> Option<(u64, bool)> {
match ty.kind() {
match *ty.kind() {
ty::Int(t) => Some((
match t {
rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
@ -699,13 +704,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let count_leading_zeroes =
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
// instead of using is_uint().
if arg_type.is_uint(&self.cx) {
if arg_type.is_uint(self.cx) {
"__builtin_clz"
}
else if arg_type.is_ulong(&self.cx) {
else if arg_type.is_ulong(self.cx) {
"__builtin_clzl"
}
else if arg_type.is_ulonglong(&self.cx) {
else if arg_type.is_ulonglong(self.cx) {
"__builtin_clzll"
}
else if width == 128 {
@ -780,17 +785,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let (count_trailing_zeroes, expected_type) =
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
// instead of using is_uint().
if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
if arg_type.is_uchar(self.cx) || arg_type.is_ushort(self.cx) || arg_type.is_uint(self.cx) {
// NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
("__builtin_ctz", self.cx.uint_type)
}
else if arg_type.is_ulong(&self.cx) {
else if arg_type.is_ulong(self.cx) {
("__builtin_ctzl", self.cx.ulong_type)
}
else if arg_type.is_ulonglong(&self.cx) {
else if arg_type.is_ulonglong(self.cx) {
("__builtin_ctzll", self.cx.ulonglong_type)
}
else if arg_type.is_u128(&self.cx) {
else if arg_type.is_u128(self.cx) {
// Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
let array_type = self.context.new_array_type(None, arg_type, 3);
let result = self.current_func()
@ -872,7 +877,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// only break apart 128-bit ints if they're not natively supported
// TODO(antoyo): remove this if/when native 128-bit integers land in libgccjit
if value_type.is_u128(&self.cx) && !self.cx.supports_128bit_integers {
if value_type.is_u128(self.cx) && !self.cx.supports_128bit_integers {
let sixty_four = self.gcc_int(value_type, 64);
let right_shift = self.gcc_lshr(value, sixty_four);
let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
@ -995,7 +1000,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// Return `result_type`'s maximum or minimum value on overflow
// NOTE: convert the type to unsigned to have an unsigned shift.
let unsigned_type = result_type.to_unsigned(&self.cx);
let unsigned_type = result_type.to_unsigned(self.cx);
let shifted = self.gcc_lshr(
self.gcc_int_cast(lhs, unsigned_type),
self.gcc_int(unsigned_type, width as i64 - 1),

View file

@ -71,11 +71,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
let mask_ty = arg_tys[0];
let mut mask = match mask_ty.kind() {
let mut mask = match *mask_ty.kind() {
ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
ty::Array(elem, len)
if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
&& len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
== Some(expected_bytes) =>
{
@ -308,10 +308,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
})
.collect();
return Ok(bx.context.new_rvalue_from_vector(None, v_type, &elems));
} else {
// avoid the unnecessary truncation as an optimization.
return Ok(bx.context.new_bitcast(None, result, v_type));
}
// avoid the unnecessary truncation as an optimization.
return Ok(bx.context.new_bitcast(None, result, v_type));
}
// since gcc doesn't have vector shuffle methods available in non-patched builds, fallback to
// component-wise bitreverses if they're not available.
@ -354,8 +353,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
if name == sym::simd_shuffle {
// Make sure this is actually an array, since typeck only checks the length-suffixed
// version of this intrinsic.
let n: u64 = match args[2].layout.ty.kind() {
ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
let n: u64 = match *args[2].layout.ty.kind() {
ty::Array(ty, len) if matches!(*ty.kind(), ty::Uint(ty::UintTy::U32)) => {
len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
|| span_bug!(span, "could not evaluate shuffle index array length"),
)
@ -428,7 +427,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
);
match m_elem_ty.kind() {
match *m_elem_ty.kind() {
ty::Int(_) => {}
_ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
}
@ -461,13 +460,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
Unsupported,
}
let in_style = match in_elem.kind() {
let in_style = match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => Style::Int,
ty::Float(_) => Style::Float,
_ => Style::Unsupported,
};
let out_style = match out_elem.kind() {
let out_style = match *out_elem.kind() {
ty::Int(_) | ty::Uint(_) => Style::Int,
ty::Float(_) => Style::Float,
_ => Style::Unsupported,
@ -494,7 +493,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
macro_rules! arith_binary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
match in_elem.kind() {
match *in_elem.kind() {
$($(ty::$p(_))|* => {
return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
})*
@ -532,7 +531,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let sign_shift = bx.context.new_rvalue_from_int(elem_type, elem_size as i32 - 1);
let one = bx.context.new_rvalue_one(elem_type);
let mut shift = 0;
for i in 0..in_len {
let elem =
bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32));
@ -540,17 +538,16 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let masked = shifted & one;
result = result
| (bx.context.new_cast(None, masked, result_type)
<< bx.context.new_rvalue_from_int(result_type, shift));
shift += 1;
<< bx.context.new_rvalue_from_int(result_type, i as i32));
}
match ret_ty.kind() {
match *ret_ty.kind() {
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
// Zero-extend iN to the bitmask type:
return Ok(result);
}
ty::Array(elem, len)
if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
&& len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
== Some(expected_bytes) =>
{
@ -589,7 +586,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
return Err(());
}};
}
let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
let (elem_ty_str, elem_ty) = if let ty::Float(ref f) = *in_elem.kind() {
let elem_ty = bx.cx.type_float_from_ty(*f);
match f.bit_width() {
32 => ("f", elem_ty),
@ -796,7 +793,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
match *t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
@ -804,7 +801,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
match *t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
@ -814,7 +811,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
_ => {
require!(
@ -838,7 +835,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// The element type of the third argument must be a signed integer type of any width:
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
match element_ty2.kind() {
match *element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
@ -910,7 +907,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
match *t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
@ -918,7 +915,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
match *t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
@ -929,7 +926,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
(ptr_count(element_ty1), non_ptr(element_ty1))
}
@ -954,7 +951,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width:
match element_ty2.kind() {
match *element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
@ -1012,7 +1009,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
macro_rules! arith_unary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
match in_elem.kind() {
match *in_elem.kind() {
$($(ty::$p(_))|* => {
return Ok(bx.$call(args[0].immediate()))
})*
@ -1136,7 +1133,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
return match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_op(args[0].immediate(), $vec_op);
if $ordered {
@ -1205,7 +1202,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
return match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())),
ty::Float(_) => Ok(bx.$float_red(args[0].immediate())),
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
@ -1234,7 +1231,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
);
args[0].immediate()
} else {
match in_elem.kind() {
match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
span,
@ -1248,7 +1245,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
args[0].immediate()
};
return match in_elem.kind() {
return match *in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_op(input, $op);
Ok(if !$boolean {

View file

@ -29,6 +29,7 @@
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
#![deny(clippy::pattern_type_mismatch)]
#![allow(clippy::needless_lifetimes)]
extern crate rustc_apfloat;
extern crate rustc_ast;

View file

@ -90,7 +90,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
Abi::Uninhabited | Abi::Aggregate { .. } => {}
}
let name = match layout.ty.kind() {
let name = match *layout.ty.kind() {
// FIXME(eddyb) producing readable type names for trait objects can result
// in problematically distinct types due to HRTB and subtyping (see #47638).
// ty::Dynamic(..) |
@ -220,7 +220,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
// to fn_ptr_backend_type handle the on-stack attribute.
// TODO(antoyo): find a less hackish way to hande the on-stack attribute.
ty::FnPtr(sig) => {
cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
}
_ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
};