diff --git a/src/intrinsics/llvm.rs b/src/intrinsics/llvm.rs index 77ac46540a9b..a987e20b11a7 100644 --- a/src/intrinsics/llvm.rs +++ b/src/intrinsics/llvm.rs @@ -13,15 +13,11 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( ret: CPlace<'tcx>, target: Option, ) { - intrinsic_match! { - fx, intrinsic, args, - _ => { - fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic)); - crate::trap::trap_unimplemented(fx, intrinsic); - }; - + match intrinsic { // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8` - "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd", (c a) { + "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => { + intrinsic_args!(fx, args => (a); intrinsic); + let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx); let lane_ty = fx.clif_type(lane_ty).unwrap(); assert!(lane_count <= 32); @@ -29,7 +25,8 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( let mut res = fx.bcx.ins().iconst(types::I32, 0); for lane in (0..lane_count).rev() { - let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx); + let a_lane = + a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx); // cast float to int let a_lane = match lane_ty { @@ -49,18 +46,33 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32)); ret.write_cvalue(fx, res); - }; - "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd", (c x, c y, o kind) { - let kind = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const"); - let flt_cc = match kind.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) { + } + "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => { + let (x, y, kind) = match args { + [x, y, kind] => (x, y, kind), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let x = codegen_operand(fx, x); + let y = codegen_operand(fx, y); + let kind = crate::constant::mir_operand_get_const_val(fx, kind) + .expect("llvm.x86.sse2.cmp.* kind not const"); + + let flt_cc = match kind + .try_to_bits(Size::from_bytes(1)) + .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) + { 0 => FloatCC::Equal, 1 => FloatCC::LessThan, 2 => FloatCC::LessThanOrEqual, 7 => { - unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`."); + unimplemented!( + "Compares corresponding elements in `a` and `b` to see if neither is `NaN`." + ); } 3 => { - unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`."); + unimplemented!( + "Compares corresponding elements in `a` and `b` to see if either is `NaN`." + ); } 4 => FloatCC::NotEqual, 5 => { @@ -79,50 +91,67 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( }; bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane) }); - }; - "llvm.x86.sse2.psrli.d", (c a, o imm8) { - let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const"); - simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { - match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) { - imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)), - _ => fx.bcx.ins().iconst(types::I32, 0), - } + } + "llvm.x86.sse2.psrli.d" => { + let (a, imm8) = match args { + [a, imm8] => (a, imm8), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let a = codegen_operand(fx, a); + let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8) + .expect("llvm.x86.sse2.psrli.d imm8 not const"); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8 + .try_to_bits(Size::from_bytes(4)) + .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) + { + imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)), + _ => fx.bcx.ins().iconst(types::I32, 0), }); - }; - "llvm.x86.sse2.pslli.d", (c a, o imm8) { - let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const"); - simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { - match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) { - imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)), - _ => fx.bcx.ins().iconst(types::I32, 0), - } + } + "llvm.x86.sse2.pslli.d" => { + let (a, imm8) = match args { + [a, imm8] => (a, imm8), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let a = codegen_operand(fx, a); + let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8) + .expect("llvm.x86.sse2.psrli.d imm8 not const"); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8 + .try_to_bits(Size::from_bytes(4)) + .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) + { + imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)), + _ => fx.bcx.ins().iconst(types::I32, 0), }); - }; - "llvm.x86.sse2.storeu.dq", (v mem_addr, c a) { + } + "llvm.x86.sse2.storeu.dq" => { + intrinsic_args!(fx, args => (mem_addr, a); intrinsic); + let mem_addr = mem_addr.load_scalar(fx); + // FIXME correctly handle the unalignment let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout()); dest.write_cvalue(fx, a); - }; - "llvm.x86.addcarry.64", (v c_in, c a, c b) { - llvm_add_sub( - fx, - BinOp::Add, - ret, - c_in, - a, - b - ); - }; - "llvm.x86.subborrow.64", (v b_in, c a, c b) { - llvm_add_sub( - fx, - BinOp::Sub, - ret, - b_in, - a, - b - ); - }; + } + "llvm.x86.addcarry.64" => { + intrinsic_args!(fx, args => (c_in, a, b); intrinsic); + let c_in = c_in.load_scalar(fx); + + llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b); + } + "llvm.x86.subborrow.64" => { + intrinsic_args!(fx, args => (b_in, a, b); intrinsic); + let b_in = b_in.load_scalar(fx); + + llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b); + } + _ => { + fx.tcx + .sess + .warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic)); + crate::trap::trap_unimplemented(fx, intrinsic); + } } let dest = target.expect("all llvm intrinsics used by stdlib should return"); diff --git a/src/intrinsics/mod.rs b/src/intrinsics/mod.rs index 8d8db1da5818..de7695041d36 100644 --- a/src/intrinsics/mod.rs +++ b/src/intrinsics/mod.rs @@ -11,9 +11,6 @@ macro_rules! intrinsic_pat { (kw.$name:ident) => { kw::$name }; - ($name:literal) => { - $name - }; } macro_rules! intrinsic_arg { @@ -26,6 +23,17 @@ macro_rules! intrinsic_arg { }; } +macro_rules! intrinsic_args { + ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => { + #[allow(unused_parens)] + let ($($arg),*) = if let [$($arg),*] = $args { + ($(codegen_operand($fx, $arg)),*) + } else { + bug!("wrong number of args for intrinsic {}", $intrinsic); + }; + } +} + macro_rules! intrinsic_match { ($fx:expr, $intrinsic:expr, $args:expr, _ => $unknown:block; diff --git a/src/intrinsics/simd.rs b/src/intrinsics/simd.rs index d1ca9edf2e0f..30e3d112594a 100644 --- a/src/intrinsics/simd.rs +++ b/src/intrinsics/simd.rs @@ -25,13 +25,10 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( ret: CPlace<'tcx>, span: Span, ) { - intrinsic_match! { - fx, intrinsic, args, - _ => { - fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic)); - }; + match intrinsic { + sym::simd_cast => { + intrinsic_args!(fx, args => (a); intrinsic); - simd_cast, (c a) { if !a.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); return; @@ -45,9 +42,11 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed) }); - }; + } + + sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => { + intrinsic_args!(fx, args => (x, y); intrinsic); - simd_eq | simd_ne | simd_lt | simd_le | simd_gt | simd_ge, (c x, c y) { if !x.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty); return; @@ -57,7 +56,9 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| { let res_lane = match (lane_ty.kind(), intrinsic) { (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane), - (ty::Uint(_), sym::simd_ne) => fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane), + (ty::Uint(_), sym::simd_ne) => { + fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane) + } (ty::Uint(_), sym::simd_lt) => { fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane) } @@ -72,8 +73,12 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( } (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane), - (ty::Int(_), sym::simd_ne) => fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane), - (ty::Int(_), sym::simd_lt) => fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane), + (ty::Int(_), sym::simd_ne) => { + fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane) + } + (ty::Int(_), sym::simd_lt) => { + fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane) + } (ty::Int(_), sym::simd_le) => { fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane) } @@ -84,13 +89,21 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane) } - (ty::Float(_), sym::simd_eq) => fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane), - (ty::Float(_), sym::simd_ne) => fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane), - (ty::Float(_), sym::simd_lt) => fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane), + (ty::Float(_), sym::simd_eq) => { + fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane) + } + (ty::Float(_), sym::simd_ne) => { + fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane) + } + (ty::Float(_), sym::simd_lt) => { + fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane) + } (ty::Float(_), sym::simd_le) => { fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane) } - (ty::Float(_), sym::simd_gt) => fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane), + (ty::Float(_), sym::simd_gt) => { + fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane) + } (ty::Float(_), sym::simd_ge) => { fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane) } @@ -103,10 +116,19 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let res_lane = fx.bcx.ins().bint(ty, res_lane); fx.bcx.ins().ineg(res_lane) }); - }; + } // simd_shuffle32(x: T, y: T, idx: [u32; 32]) -> U - _ if intrinsic.as_str().starts_with("simd_shuffle"), (c x, c y, o idx) { + _ if intrinsic.as_str().starts_with("simd_shuffle") => { + let (x, y, idx) = match args { + [x, y, idx] => (x, y, idx), + _ => { + bug!("wrong number of args for intrinsic {intrinsic}"); + } + }; + let x = codegen_operand(fx, x); + let y = codegen_operand(fx, y); + if !x.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty); return; @@ -119,11 +141,13 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( // version of this intrinsic. let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx)); match idx_ty.kind() { - ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { - len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| { + ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len + .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all()) + .unwrap_or_else(|| { span_bug!(span, "could not evaluate shuffle index array length") - }).try_into().unwrap() - } + }) + .try_into() + .unwrap(), _ => { fx.tcx.sess.span_err( span, @@ -154,24 +178,30 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let indexes = { use rustc_middle::mir::interpret::*; - let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const"); + let idx_const = crate::constant::mir_operand_get_const_val(fx, idx) + .expect("simd_shuffle* idx not const"); let idx_bytes = match idx_const { ConstValue::ByRef { alloc, offset } => { - let size = Size::from_bytes(4 * ret_lane_count /* size_of([u32; ret_lane_count]) */); + let size = Size::from_bytes( + 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */ + ); alloc.inner().get_bytes(fx, alloc_range(offset, size)).unwrap() } _ => unreachable!("{:?}", idx_const), }; - (0..ret_lane_count).map(|i| { - let i = usize::try_from(i).unwrap(); - let idx = rustc_middle::mir::interpret::read_target_uint( - fx.tcx.data_layout.endian, - &idx_bytes[4*i.. 4*i + 4], - ).expect("read_target_uint"); - u16::try_from(idx).expect("try_from u32") - }).collect::>() + (0..ret_lane_count) + .map(|i| { + let i = usize::try_from(i).unwrap(); + let idx = rustc_middle::mir::interpret::read_target_uint( + fx.tcx.data_layout.endian, + &idx_bytes[4 * i..4 * i + 4], + ) + .expect("read_target_uint"); + u16::try_from(idx).expect("try_from u32") + }) + .collect::>() }; for &idx in &indexes { @@ -187,43 +217,63 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap()); out_lane.write_cvalue(fx, in_lane); } - }; + } + + sym::simd_insert => { + let (base, idx, val) = match args { + [base, idx, val] => (base, idx, val), + _ => { + bug!("wrong number of args for intrinsic {intrinsic}"); + } + }; + let base = codegen_operand(fx, base); + let val = codegen_operand(fx, val); - simd_insert, (c base, o idx, c val) { // FIXME validate - let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) { + let idx_const = if let Some(idx_const) = + crate::constant::mir_operand_get_const_val(fx, idx) + { idx_const } else { - fx.tcx.sess.span_fatal( - span, - "Index argument for `simd_insert` is not a constant", - ); + fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant"); }; - let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const)); + let idx = idx_const + .try_to_bits(Size::from_bytes(4 /* u32*/)) + .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const)); let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx); if idx >= lane_count.into() { - fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count)); + fx.tcx.sess.span_fatal( + fx.mir.span, + &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count), + ); } ret.write_cvalue(fx, base); let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap())); ret_lane.write_cvalue(fx, val); - }; + } + + sym::simd_extract => { + let (v, idx) = match args { + [v, idx] => (v, idx), + _ => { + bug!("wrong number of args for intrinsic {intrinsic}"); + } + }; + let v = codegen_operand(fx, v); - simd_extract, (c v, o idx) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; } - let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) { + let idx_const = if let Some(idx_const) = + crate::constant::mir_operand_get_const_val(fx, idx) + { idx_const } else { - fx.tcx.sess.span_warn( - span, - "Index argument for `simd_extract` is not a constant", - ); + fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant"); let res = crate::trap::trap_unimplemented_ret_value( fx, ret.layout(), @@ -233,89 +283,105 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( return; }; - let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const)); + let idx = idx_const + .try_to_bits(Size::from_bytes(4 /* u32*/)) + .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const)); let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx); if idx >= lane_count.into() { - fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count)); + fx.tcx.sess.span_fatal( + fx.mir.span, + &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count), + ); } let ret_lane = v.value_lane(fx, idx.try_into().unwrap()); ret.write_cvalue(fx, ret_lane); - }; + } + + sym::simd_neg => { + intrinsic_args!(fx, args => (a); intrinsic); - simd_neg, (c a) { if !a.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); return; } - simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| { - match lane_ty.kind() { + simd_for_each_lane( + fx, + a, + ret, + &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() { ty::Int(_) => fx.bcx.ins().ineg(lane), ty::Float(_) => fx.bcx.ins().fneg(lane), _ => unreachable!(), - } - }); - }; + }, + ); + } - simd_add | simd_sub | simd_mul | simd_div | simd_rem - | simd_shl | simd_shr | simd_and | simd_or | simd_xor, (c x, c y) { - if !x.layout().ty.is_simd() { - report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty); - return; - } + sym::simd_add + | sym::simd_sub + | sym::simd_mul + | sym::simd_div + | sym::simd_rem + | sym::simd_shl + | sym::simd_shr + | sym::simd_and + | sym::simd_or + | sym::simd_xor => { + intrinsic_args!(fx, args => (x, y); intrinsic); // FIXME use vector instructions when possible - simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| match ( - lane_ty.kind(), - intrinsic, - ) { - (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane), - (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane), - (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane), - (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane), - (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane), + simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| { + match (lane_ty.kind(), intrinsic) { + (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane), + (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane), + (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane), + (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane), + (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane), - (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane), - (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane), - (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane), - (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane), - (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane), + (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane), + (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane), + (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane), + (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane), + (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane), - (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane), - (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane), - (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane), - (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane), - (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call( - "fmodf", - vec![AbiParam::new(types::F32), AbiParam::new(types::F32)], - vec![AbiParam::new(types::F32)], - &[x_lane, y_lane], - )[0], - (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call( - "fmod", - vec![AbiParam::new(types::F64), AbiParam::new(types::F64)], - vec![AbiParam::new(types::F64)], - &[x_lane, y_lane], - )[0], + (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane), + (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane), + (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane), + (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane), + (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call( + "fmodf", + vec![AbiParam::new(types::F32), AbiParam::new(types::F32)], + vec![AbiParam::new(types::F32)], + &[x_lane, y_lane], + )[0], + (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call( + "fmod", + vec![AbiParam::new(types::F64), AbiParam::new(types::F64)], + vec![AbiParam::new(types::F64)], + &[x_lane, y_lane], + )[0], - (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane), - (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane), - (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane), - (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane), - (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane), + (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane), + (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane), + (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane), + (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane), + (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane), - (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane), - (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane), - (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane), - (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane), - (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane), + (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane), + (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane), + (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane), + (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane), + (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane), - _ => unreachable!(), + _ => unreachable!(), + } }); - }; + } + + sym::simd_fma => { + intrinsic_args!(fx, args => (a, b, c); intrinsic); - simd_fma, (c a, c b, c c) { if !a.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); return; @@ -333,16 +399,22 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let c_lane = c.value_lane(fx, lane); let res_lane = match lane_ty.kind() { - ty::Float(FloatTy::F32) => fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty), - ty::Float(FloatTy::F64) => fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty), + ty::Float(FloatTy::F32) => { + fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty) + } + ty::Float(FloatTy::F64) => { + fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty) + } _ => unreachable!(), }; ret.place_lane(fx, lane).write_cvalue(fx, res_lane); } - }; + } + + sym::simd_fmin | sym::simd_fmax => { + intrinsic_args!(fx, args => (x, y); intrinsic); - simd_fmin | simd_fmax, (c x, c y) { if !x.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty); return; @@ -351,7 +423,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( // FIXME use vector instructions when possible simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| { match lane_ty.kind() { - ty::Float(_) => {}, + ty::Float(_) => {} _ => unreachable!("{:?}", lane_ty), } match intrinsic { @@ -360,16 +432,21 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( _ => unreachable!(), } }); - }; + } + + sym::simd_round => { + intrinsic_args!(fx, args => (a); intrinsic); - simd_round, (c a) { if !a.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); return; } - simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| { - match lane_ty.kind() { + simd_for_each_lane( + fx, + a, + ret, + &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() { ty::Float(FloatTy::F32) => fx.lib_call( "roundf", vec![AbiParam::new(types::F32)], @@ -383,11 +460,13 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( &[lane], )[0], _ => unreachable!("{:?}", lane_ty), - } - }); - }; + }, + ); + } + + sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => { + intrinsic_args!(fx, args => (a); intrinsic); - simd_fabs | simd_fsqrt | simd_ceil | simd_floor | simd_trunc, (c a) { if !a.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); return; @@ -395,7 +474,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| { match lane_ty.kind() { - ty::Float(_) => {}, + ty::Float(_) => {} _ => unreachable!("{:?}", lane_ty), } match intrinsic { @@ -407,9 +486,12 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( _ => unreachable!(), } }); - }; + } + + sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => { + intrinsic_args!(fx, args => (v, acc); intrinsic); + let acc = acc.load_scalar(fx); - simd_reduce_add_ordered | simd_reduce_add_unordered, (c v, v acc) { // FIXME there must be no acc param for integer vectors if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); @@ -423,9 +505,12 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx.bcx.ins().iadd(a, b) } }); - }; + } + + sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => { + intrinsic_args!(fx, args => (v, acc); intrinsic); + let acc = acc.load_scalar(fx); - simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v, v acc) { // FIXME there must be no acc param for integer vectors if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); @@ -439,54 +524,66 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx.bcx.ins().imul(a, b) } }); - }; + } + + sym::simd_reduce_all => { + intrinsic_args!(fx, args => (v); intrinsic); - simd_reduce_all, (c v) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; } simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b)); - }; + } + + sym::simd_reduce_any => { + intrinsic_args!(fx, args => (v); intrinsic); - simd_reduce_any, (c v) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; } simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b)); - }; + } + + sym::simd_reduce_and => { + intrinsic_args!(fx, args => (v); intrinsic); - simd_reduce_and, (c v) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; } simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b)); - }; + } + + sym::simd_reduce_or => { + intrinsic_args!(fx, args => (v); intrinsic); - simd_reduce_or, (c v) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; } simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b)); - }; + } + + sym::simd_reduce_xor => { + intrinsic_args!(fx, args => (v); intrinsic); - simd_reduce_xor, (c v) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; } simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b)); - }; + } + + sym::simd_reduce_min => { + intrinsic_args!(fx, args => (v); intrinsic); - simd_reduce_min, (c v) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; @@ -501,9 +598,11 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; fx.bcx.ins().select(lt, a, b) }); - }; + } + + sym::simd_reduce_max => { + intrinsic_args!(fx, args => (v); intrinsic); - simd_reduce_max, (c v) { if !v.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty); return; @@ -518,9 +617,11 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }; fx.bcx.ins().select(gt, a, b) }); - }; + } + + sym::simd_select => { + intrinsic_args!(fx, args => (m, a, b); intrinsic); - simd_select, (c m, c a, c b) { if !m.layout().ty.is_simd() { report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty); return; @@ -540,15 +641,19 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let b_lane = b.value_lane(fx, lane).load_scalar(fx); let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0); - let res_lane = CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout); + let res_lane = + CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout); ret.place_lane(fx, lane).write_cvalue(fx, res_lane); } - }; + } // simd_saturating_* // simd_bitmask // simd_scatter // simd_gather + _ => { + fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic)); + } } }