rustc_target: avoid using AbiAndPrefAlign where possible.

This commit is contained in:
Eduard-Mihai Burtescu 2018-09-09 01:16:45 +03:00
parent 3ce8d444af
commit 5b4747ded7
38 changed files with 311 additions and 334 deletions

View file

@ -73,7 +73,7 @@ impl ArgAttributesExt for ArgAttributes {
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentAttr(llfn,
idx.as_uint(),
align.abi.bytes() as u32);
align.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
}
@ -98,7 +98,7 @@ impl ArgAttributesExt for ArgAttributes {
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
idx.as_uint(),
align.abi.bytes() as u32);
align.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
}
@ -204,7 +204,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
return;
}
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized ArgType must be handled through store_fn_arg");
} else if let PassMode::Cast(cast) = self.mode {
@ -214,7 +214,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx()));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align);
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
@ -242,7 +242,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
// ...and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
self.layout.align,
self.layout.align.abi,
llscratch,
scratch_align,
bx.cx().const_usize(self.layout.size.bytes()),
@ -273,7 +273,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect(_, Some(_)) => {
OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst);
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
self.store(bx, next(), dst);
@ -545,7 +545,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
adjust_for_rust_scalar(&mut b_attrs,
b,
arg.layout,
a.value.size(cx).abi_align(b.value.align(cx)),
a.value.size(cx).align_to(b.value.align(cx).abi),
false);
arg.mode = PassMode::Pair(a_attrs, b_attrs);
return arg;

View file

@ -19,7 +19,7 @@ use type_of::LayoutLlvmExt;
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, AbiAndPrefAlign, Size, TyLayout};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::session::config;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
@ -457,7 +457,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign) -> &'ll Value {
fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
@ -465,7 +465,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bx.dynamic_alloca(ty, name, align)
}
fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign) -> &'ll Value {
fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
@ -475,7 +475,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llvm::LLVMBuildAlloca(self.llbuilder, ty,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi.bytes() as c_uint);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
@ -484,7 +484,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ty: &'ll Type,
len: &'ll Value,
name: &str,
align: AbiAndPrefAlign) -> &'ll Value {
align: Align) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
@ -494,16 +494,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
name.as_ptr())
};
llvm::LLVMSetAlignment(alloca, align.abi.bytes() as c_uint);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
fn load(&mut self, ptr: &'ll Value, align: AbiAndPrefAlign) -> &'ll Value {
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
self.count_insn("load");
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
llvm::LLVMSetAlignment(load, align.abi.bytes() as c_uint);
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load
}
}
@ -639,7 +639,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: AbiAndPrefAlign) -> &'ll Value {
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
self.store_with_flags(val, ptr, align, MemFlags::empty())
}
@ -647,7 +647,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
&mut self,
val: &'ll Value,
ptr: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
flags: MemFlags,
) -> &'ll Value {
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
@ -658,7 +658,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let align = if flags.contains(MemFlags::UNALIGNED) {
1
} else {
align.abi.bytes() as c_uint
align.bytes() as c_uint
};
llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
@ -878,8 +878,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn memcpy(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
src: &'ll Value, src_align: AbiAndPrefAlign,
fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
@ -893,13 +893,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi.bytes() as c_uint,
src, src_align.abi.bytes() as c_uint, size, is_volatile);
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.bytes() as c_uint, size, is_volatile);
}
}
fn memmove(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
src: &'ll Value, src_align: AbiAndPrefAlign,
fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memmove.
@ -913,8 +913,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi.bytes() as c_uint,
src, src_align.abi.bytes() as c_uint, size, is_volatile);
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.bytes() as c_uint, size, is_volatile);
}
}
@ -923,14 +923,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr: &'ll Value,
fill_byte: &'ll Value,
size: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
flags: MemFlags,
) {
let ptr_width = &self.cx().sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.cx().type_i8p());
let align = self.cx().const_u32(align.abi.bytes() as u32);
let align = self.cx().const_u32(align.bytes() as u32);
let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
}

View file

@ -357,7 +357,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, layout.align, None);
let base_addr = self.static_addr_of(init, layout.align.abi, None);
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
self.static_bitcast(base_addr, self.type_i8p()),

View file

@ -28,7 +28,7 @@ use value::Value;
use rustc::ty::{self, Ty};
use rustc_codegen_ssa::traits::*;
use rustc::ty::layout::{self, Size, Align, AbiAndPrefAlign, LayoutOf};
use rustc::ty::layout::{self, Size, Align, LayoutOf};
use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
@ -89,20 +89,20 @@ pub fn codegen_static_initializer(
fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
gv: &'ll Value,
mut align: AbiAndPrefAlign) {
mut align: Align) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
if let Some(min) = cx.sess().target.target.options.min_global_align {
match Align::from_bits(min) {
Ok(min) => align = align.max(AbiAndPrefAlign::new(min)),
Ok(min) => align = align.max(min),
Err(err) => {
cx.sess().err(&format!("invalid minimum global alignment: {}", err));
}
}
}
unsafe {
llvm::LLVMSetAlignment(gv, align.abi.bytes() as u32);
llvm::LLVMSetAlignment(gv, align.bytes() as u32);
}
}
@ -186,7 +186,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn static_addr_of_mut(
&self,
cv: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
kind: Option<&str>,
) -> &'ll Value {
unsafe {
@ -212,14 +212,14 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn static_addr_of(
&self,
cv: &'ll Value,
align: AbiAndPrefAlign,
align: Align,
kind: Option<&str>,
) -> &'ll Value {
if let Some(&gv) = self.const_globals.borrow().get(&cv) {
unsafe {
// Upgrade the alignment in cases where the same constant is used with different
// alignment requirements
let llalign = align.abi.bytes() as u32;
let llalign = align.bytes() as u32;
if llalign > llvm::LLVMGetAlignment(gv) {
llvm::LLVMSetAlignment(gv, llalign);
}

View file

@ -35,7 +35,7 @@ use rustc_data_structures::fingerprint::Fingerprint;
use rustc::ty::Instance;
use common::CodegenCx;
use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt};
use rustc::ty::layout::{self, AbiAndPrefAlign, HasDataLayout, Integer, IntegerExt, LayoutOf,
use rustc::ty::layout::{self, Align, HasDataLayout, Integer, IntegerExt, LayoutOf,
PrimitiveExt, Size, TyLayout};
use rustc::session::config;
use rustc::util::nodemap::FxHashMap;
@ -323,7 +323,7 @@ fn fixed_vec_metadata(
llvm::LLVMRustDIBuilderCreateArrayType(
DIB(cx),
size.bits(),
align.abi.bits() as u32,
align.bits() as u32,
element_type_metadata,
subscripts)
};
@ -465,7 +465,7 @@ fn trait_pointer_metadata(
syntax_pos::DUMMY_SP),
offset: layout.fields.offset(0),
size: data_ptr_field.size,
align: data_ptr_field.align,
align: data_ptr_field.align.abi,
flags: DIFlags::FlagArtificial,
discriminant: None,
},
@ -474,7 +474,7 @@ fn trait_pointer_metadata(
type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP),
offset: layout.fields.offset(1),
size: vtable_field.size,
align: vtable_field.align,
align: vtable_field.align.abi,
flags: DIFlags::FlagArtificial,
discriminant: None,
},
@ -787,7 +787,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
DIB(cx),
name.as_ptr(),
size.bits(),
align.abi.bits() as u32,
align.bits() as u32,
encoding)
};
@ -818,7 +818,7 @@ fn pointer_type_metadata(
DIB(cx),
pointee_type_metadata,
pointer_size.bits(),
pointer_align.abi.bits() as u32,
pointer_align.bits() as u32,
name.as_ptr())
}
}
@ -923,7 +923,7 @@ struct MemberDescription<'ll> {
type_metadata: &'ll DIType,
offset: Size,
size: Size,
align: AbiAndPrefAlign,
align: Align,
flags: DIFlags,
discriminant: Option<u64>,
}
@ -990,7 +990,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
type_metadata: type_metadata(cx, field.ty, self.span),
offset: layout.fields.offset(i),
size: field.size,
align: field.align,
align: field.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
@ -1113,7 +1113,7 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
type_metadata: type_metadata(cx, field.ty, self.span),
offset: Size::ZERO,
size: field.size,
align: field.align,
align: field.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
@ -1226,7 +1226,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
align: self.layout.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
@ -1265,7 +1265,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
align: self.layout.align.abi,
flags: DIFlags::FlagZero,
discriminant: Some(self.layout.ty.ty_adt_def().unwrap()
.discriminant_for_variant(cx.tcx, i)
@ -1334,7 +1334,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: variant.size,
align: variant.align,
align: variant.align.abi,
flags: DIFlags::FlagZero,
discriminant: None,
}
@ -1372,7 +1372,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
align: self.layout.align.abi,
flags: DIFlags::FlagZero,
discriminant: niche_value,
}
@ -1675,7 +1675,7 @@ fn prepare_enum_metadata(
file_metadata,
UNKNOWN_LINE_NUMBER,
size.bits(),
align.abi.bits() as u32,
align.bits() as u32,
layout.fields.offset(0).bits(),
DIFlags::FlagArtificial,
discr_metadata))
@ -1803,7 +1803,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>,
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
member_description.size.bits(),
member_description.align.abi.bits() as u32,
member_description.align.bits() as u32,
member_description.offset.bits(),
match member_description.discriminant {
None => None,
@ -1851,7 +1851,7 @@ fn create_struct_stub(
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
struct_size.bits(),
struct_align.abi.bits() as u32,
struct_align.bits() as u32,
DIFlags::FlagZero,
None,
empty_array,
@ -1889,7 +1889,7 @@ fn create_union_stub(
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
union_size.bits(),
union_align.abi.bits() as u32,
union_align.bits() as u32,
DIFlags::FlagZero,
Some(empty_array),
0, // RuntimeLang
@ -1958,7 +1958,7 @@ pub fn create_global_var_metadata(
is_local_to_unit,
global,
None,
global_align.abi.bytes() as u32,
global_align.bytes() as u32,
);
}
}

View file

@ -201,7 +201,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
cx.sess().opts.optimize != config::OptLevel::No,
DIFlags::FlagZero,
argument_index,
align.abi.bytes() as u32,
align.bytes() as u32,
)
};
source_loc::set_debug_location(self,

View file

@ -110,7 +110,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let name = &*tcx.item_name(def_id).as_str();
let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx());
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
let simple = get_simple_intrinsic(self.cx(), name);
let llval = match name {
@ -158,7 +158,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
}
"min_align_of" => {
let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).abi.bytes())
self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
@ -167,12 +167,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llalign
} else {
self.cx().const_usize(self.cx().align_of(tp_ty).abi.bytes())
self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
}
}
"pref_align_of" => {
let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).pref.bytes())
self.cx().const_usize(self.cx().layout_of(tp_ty).align.pref.bytes())
}
"type_name" => {
let tp_ty = substs.type_at(0);
@ -261,7 +261,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let align = if name == "unaligned_volatile_load" {
1
} else {
self.cx().align_of(tp_ty).abi.bytes() as u32
self.cx().align_of(tp_ty).bytes() as u32
};
unsafe {
llvm::LLVMSetAlignment(load, align);
@ -815,7 +815,7 @@ fn try_intrinsic(
) {
if bx.cx().sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.cx().sess()) {
codegen_msvc_try(bx, func, data, local_ptr, dest);
@ -890,7 +890,7 @@ fn codegen_msvc_try(
//
// More information can be found in libstd's seh.rs implementation.
let i64p = bx.cx().type_ptr_to(bx.cx().type_i64());
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
@ -906,7 +906,7 @@ fn codegen_msvc_try(
let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]);
let addr = catchpad.load(slot, ptr_align);
let i64_align = bx.tcx().data_layout.i64_align;
let i64_align = bx.tcx().data_layout.i64_align.abi;
let arg1 = catchpad.load(addr, i64_align);
let val1 = bx.cx().const_i32(1);
let gep1 = catchpad.inbounds_gep(addr, &[val1]);
@ -923,7 +923,7 @@ fn codegen_msvc_try(
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align;
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@ -982,7 +982,7 @@ fn codegen_gnu_try(
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p()));
let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p()));
catch.store(ptr, bitcast, ptr_align);
catch.ret(bx.cx().const_i32(1));
@ -991,7 +991,7 @@ fn codegen_gnu_try(
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align;
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@ -1436,7 +1436,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi.bytes() as i32);
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
@ -1536,7 +1536,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi.bytes() as i32);
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {

View file

@ -12,7 +12,7 @@ use abi::{FnType, FnTypeExt};
use common::*;
use rustc::hir;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, AbiAndPrefAlign, LayoutOf, Size, TyLayout};
use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
use rustc_target::abi::FloatTy;
use rustc_mir::monomorphize::item::DefPathBasedNames;
use type_::Type;
@ -80,7 +80,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
match layout.fields {
layout::FieldPlacement::Union(_) => {
let fill = cx.type_padding_filler(layout.size, layout.align);
let fill = cx.type_padding_filler(layout.size, layout.align.abi);
let packed = false;
match name {
None => {
@ -120,23 +120,23 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let mut packed = false;
let mut offset = Size::ZERO;
let mut prev_effective_align = layout.align;
let mut prev_effective_align = layout.align.abi;
let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize);
let field = layout.field(cx, i);
let effective_field_align = AbiAndPrefAlign::new(layout.align.abi
let effective_field_align = layout.align.abi
.min(field.align.abi)
.restrict_for_offset(target_offset));
packed |= effective_field_align.abi < field.align.abi;
.restrict_for_offset(target_offset);
packed |= effective_field_align < field.align.abi;
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
effective_field_align: {}",
i, field, offset, target_offset, effective_field_align.abi.bytes());
i, field, offset, target_offset, effective_field_align.bytes());
assert!(target_offset >= offset);
let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align);
assert_eq!(offset.abi_align(padding_align) + padding, target_offset);
assert_eq!(offset.align_to(padding_align) + padding, target_offset);
result.push(cx.type_padding_filler( padding, padding_align));
debug!(" padding before: {:?}", padding);
@ -151,7 +151,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
let padding = layout.size - offset;
let padding_align = prev_effective_align;
assert_eq!(offset.abi_align(padding_align) + padding, layout.size);
assert_eq!(offset.align_to(padding_align) + padding, layout.size);
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, layout.size);
result.push(cx.type_padding_filler(padding, padding_align));
@ -165,17 +165,17 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
pub fn align_of(&self, ty: Ty<'tcx>) -> AbiAndPrefAlign {
self.layout_of(ty).align
pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
self.layout_of(ty).align.abi
}
pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
self.layout_of(ty).size
}
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, AbiAndPrefAlign) {
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
let layout = self.layout_of(ty);
(layout.size, layout.align)
(layout.size, layout.align.abi)
}
}
@ -197,7 +197,7 @@ pub enum PointerKind {
#[derive(Copy, Clone)]
pub struct PointeeInfo {
pub size: Size,
pub align: AbiAndPrefAlign,
pub align: Align,
pub safe: Option<PointerKind>,
}
@ -333,7 +333,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
layout::Pointer => {
// If we know the alignment, pick something better than i8.
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
cx.type_pointee_for_abi_align( pointee.align)
cx.type_pointee_for_align(pointee.align)
} else {
cx.type_i8()
};
@ -377,7 +377,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
let offset = if index == 0 {
Size::ZERO
} else {
a.value.size(cx).abi_align(b.value.align(cx))
a.value.size(cx).align_to(b.value.align(cx).abi)
};
self.scalar_llvm_type_at(cx, scalar, offset)
}